Stress x EE MA

Supplementary Material

Setting-up

Loading packages

pacman::p_load(tidyverse, 
               here,
               metafor,
               clubSandwich,
               orchaRd, 
               MuMIn, 
               patchwork,
               GoodmanKruskal,
               networkD3,
               ggplot2,
               visdat,
               ggalluvial,
               ggthemr,
               cowplot)
# needed for model selection using MuMIn within metafor
eval(metafor:::.MuMIn)

Loading data and functions

dat <- read_csv(here("Data","Data_raw.csv"))
# Load custom function to extract data 
source(here("R/Functions.R")) 

Data exploration

General

#Number of effect sizes
length(unique(dat$ES_ID))  

#Number of studies
length(unique(dat$Study_ID))

#Publication years
min(dat$Year_published) 
max(dat$Year_published)

Explore associations among predictor variables

plot_missing <- vis_miss(dat) +
  theme(plot.title = element_text(hjust = 0.5, vjust = 3), 
        plot.margin = margin(t = 0.5, r = 3, b = 1, l = 1, unit = "cm")) +
  ggtitle("Missing data in the selected predictors") #no missing values

plot_missing

#useGoodman and Kruskal’s τ measure of association between categorical predictor variables (function from package GoodmanKruskal: https://cran.r-project.org/web/packages/GoodmanKruskal/vignettes/GoodmanKruskal.html)
#GKmatrix <- GKtauDataframe(subset(dat, select = c("Sex", "Type_assay", "Learning_vs_memory", #"Type_reinforcement",  "Type_stress_exposure", "Age_stress_exposure", "Stress_duration", #"EE_social_HR", "EE_exercise", "Age_EE_exposure", "Exposure_order", "Age_assay")))
#plot(GKmatrix)

#simple pairwise contingency tables
# table(dat$Type_assay, dat$Type_reinforcement) 
# table(dat$Age_stress_exposure, dat$Age_EE_exposure) 
# table(dat$Type_stress_exposure, dat$Age_stress_exposure)
# table(dat$Type_stress_exposure, dat$Age_assay)
# table(dat$Type_stress_exposure, dat$Stress_duration)

Alluvial diagrams

#A. subjects info: species-strain-sex

freq_A <- as.data.frame(table(dat$Sex, dat$Common_species, dat$Strain)) %>% rename(Sex = Var1, Species = Var2, Strain = Var3) #make a data frame of frequencies for three selected variables
is_alluvia_form(as.data.frame(freq_A), axes = 1:3, silent = TRUE)

p1 <- ggplot(data = freq_A,
  aes(axis1 = Sex, axis2 = Species, axis3 = Strain, y = Freq)) +
  geom_alluvium(aes(fill = Sex)) +
  geom_flow() +
  geom_stratum(aes(fill = Sex)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
  #theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold"),
        plot.margin = unit(c(1, 1, 0, 1), "cm")) +
  scale_x_discrete(limits = c("Sex", "Species", "Strain"), expand = c(0.15, 0.05), position = "top") +
  ggtitle("A  study subjects")

p1

#B. EE info: type-exercise-social EE

freq_B <- as.data.frame(table(dat$Type_EE_exposure, dat$EE_exercise, dat$EE_social)) %>% rename(Type_EE = Var1, EE_exercise = Var2, EE_social = Var3) #make a data frame of frequencies for three selected variables
is_alluvia_form(as.data.frame(freq_B), axes = 1:3, silent = TRUE)

p2 <- ggplot(data = freq_B,
  aes(axis1 = Type_EE, axis2 = EE_exercise, axis3 = EE_social, y = Freq)) +
  geom_alluvium(aes(fill = Type_EE)) +
  geom_flow() +
  geom_stratum(aes(fill = Type_EE)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
  #theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold"),
        plot.margin = unit(c(1, 1, 0, 1), "cm")) +
  scale_x_discrete(limits = c("Type", "Exercise", "Social"), expand = c(0.1, 0.1), position = "top") +
  ggtitle("C  environmental enrichment")

p2

#C. stress info: age-duration-type stress

freq_C <- as.data.frame(table(dat$Age_stress_exposure, dat$Stress_duration, dat$Type_stress_exposure)) %>% rename(Age_stress = Var1, Duration_stress = Var2, Type_stress = Var3) #make a data frame of frequencies for three selected variables
is_alluvia_form(as.data.frame(freq_C), axes = 1:3, silent = TRUE)

p3 <- ggplot(data = freq_C,
  aes(axis1 = Age_stress, axis2 = Duration_stress, axis3 = Type_stress, y = Freq)) +
  geom_alluvium(aes(fill = Age_stress)) +
  geom_flow() +
  geom_stratum(aes(fill = Age_stress)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
  #theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold"),
        plot.margin = unit(c(1, 1, 0, 1), "cm")) +
  scale_x_discrete(limits = c("Age", "Duration", "Type"), expand = c(0.1, 0.1), position = "top") +
  ggtitle("B  stress exposure")

p3

#D. assay info: L/M-type-reinforcement

freq_D <- as.data.frame(table(dat$Learning_vs_memory, dat$Type_assay, dat$Type_reinforcement)) %>% rename(Learning_Memory = Var1, Type = Var2, Reinforcement = Var3) #make a data frame of frequencies for three selected variables
is_alluvia_form(as.data.frame(freq_D), axes = 1:3, silent = TRUE)

p4 <- ggplot(data = freq_D,
  aes(axis1 = Learning_Memory, axis2 = Type, axis3 = Reinforcement, y = Freq)) +
  geom_alluvium(aes(fill = Learning_Memory)) +
  geom_flow() +
  geom_stratum(aes(fill = Learning_Memory)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
  #theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold"),
        plot.margin = unit(c(1, 1, 0, 1), "cm")) +
  scale_x_discrete(limits = c("Learning_Memory", "Type", "Reinforcement"), expand = c(0.1, 0.1), position = "top") +
  ggtitle("D  cognitive assay")

p4

#p1 + scale_fill_brewer(palette = "Set3") #Pastel1

(p1 + scale_fill_brewer(palette = "Set3")) / (p2 + scale_fill_brewer(palette = "Set3")) / (p3 + scale_fill_brewer(palette = "Set3")) / (p4 + scale_fill_brewer(palette = "Set3")) + plot_layout(ncol = 1, heights = c(1,1,1,1,1))

#ggsave(file = "./figs/Alluvial_diagrams_v0.pdf", width = 10, height = 12, units = "cm", dpi = 300, scale = 2, device = cairo_pdf)

Data organisation

Removing study with negative values, getting effect sizes from function, ‘flipping’ effect sizes so that all effect sizes are higher values = individuals do better and learning/memory, shifting negative values to positive as lnRR cannot use negative values, assigining human readable terms, and creating VCV of variance

#this code is to process the data using "Data_raw". Processed data after this step was loaded above
#removing study with negative values as these are unable to be used for lnRR
dat <- droplevels(dat[!dat$First_author == 'Wang',])

# #Getting effect sizes
# effect_size <- effect_set(CC_n = "CC_n", CC_mean = "CC_mean", CC_SD = "CC_SD",
#                           EC_n = "EC_n", EC_mean = "EC_mean" , EC_SD ="EC_SD",
#                           CS_n = "CS_n", CS_mean = "CS_mean", CS_SD = "CS_SD",
#                           ES_n = "ES_n", ES_mean = "ES_mean", ES_SD = "ES_SD",
#                           data = dat)
# #'pure' effect sizes
# effect_size2 <- effect_set2(CC_n = "CC_n", CC_mean = "CC_mean", CC_SD = "CC_SD",
#                           EC_n = "EC_n", EC_mean = "EC_mean" , EC_SD ="EC_SD",
#                           CS_n = "CS_n", CS_mean = "CS_mean", CS_SD = "CS_SD",
#                           ES_n = "ES_n", ES_mean = "ES_mean", ES_SD = "ES_SD",
#                           data = dat) 
#rounding down integer sample sizes 
dat$CC_n <- floor(dat$CC_n)
dat$EC_n <- floor(dat$EC_n)
dat$CS_n <- floor(dat$CS_n)
dat$ES_n <- floor(dat$CS_n)

# 'Focal' effect_size 
effect_size <- with(dat, mapply(effect_set, 
                      CC_n ,
                      CC_mean, 
                      CC_SD,
                      EC_n, 
                      EC_mean, 
                      EC_SD,
                      CS_n, 
                      CS_mean, 
                      CS_SD,
                      ES_n, 
                      ES_mean, 
                      ES_SD,
                      percent = Response_percent,
                      SIMPLIFY = FALSE))
effect_size <- map_dfr(effect_size, I)

# 'Pairwise' effect size

 effect_size2 <- with(dat, mapply(effect_set2, 
                      CC_n ,
                      CC_mean, 
                      CC_SD,
                      EC_n, 
                      EC_mean, 
                      EC_SD,
                      CS_n, 
                      CS_mean, 
                      CS_SD,
                      ES_n, 
                      ES_mean, 
                      ES_SD,
                      percent = Response_percent,
                      SIMPLIFY = FALSE))
effect_size2 <- map_dfr(effect_size2, I)

#Removing missing effect sizes
dim(dat)
## [1] 92 73
full_info <- which(complete.cases(effect_size) == TRUE)

# adding effect sizes as column
dat <- bind_cols(dat, effect_size, effect_size2)
dat <- dat[full_info, ]

#Flipping 'lower is better' effect sizes
#flipping lnRR for values where higher = worse
dat$lnRR_Ea <- ifelse(dat$Response_direction == 2, dat$lnRR_E*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_E))
# currently NAswhich causes error
dat$lnRR_Sa  <- ifelse(dat$Response_direction == 2, dat$lnRR_S*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_S)) # currently NAswhich causes error
dat$lnRR_ESa <-  ifelse(dat$Response_direction == 2, dat$lnRR_ES*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_ES)) # currently NAswhich causes error
#flipping 'pure effect sizes'
dat$lnRR_E2a <- ifelse(dat$Response_direction == 2, dat$lnRR_E2*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_E2)) # currently NAswhich causes error
dat$lnRR_S2a  <- ifelse(dat$Response_direction == 2, dat$lnRR_S2*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_S2)) # currently NAswhich causes error
dat$lnRR_ES2a <-  ifelse(dat$Response_direction == 2, dat$lnRR_ES2*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_ES2)) # currently NAswhich causes error
dat$lnRR_E3a <-  ifelse(dat$Response_direction == 2, dat$lnRR_E3*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_E3)) # currently NAswhich causes error
dat$lnRR_S3a <-  ifelse(dat$Response_direction == 2, dat$lnRR_S3*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_S3)) # currently NAswhich causes error

#flipping SMD
dat$SMD_Ea <- ifelse(dat$Response_direction == 2, dat$SMD_E*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$SMD_E)) # currently NAswhich causes error
dat$SMD_Sa  <- ifelse(dat$Response_direction == 2, dat$SMD_S*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$SMD_S)) # currently NAswhich causes error
dat$SMD_ESa <-  ifelse(dat$Response_direction == 2, dat$SMD_ES*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$SMD_ES))

#assigning human readable terms
dat <- dat %>% mutate(Type_assay = case_when(Type_assay == 1 ~ "Habituation",
                                                Type_assay == 2 ~ "Conditioning",
                                                Type_assay == 3 ~ "Recognition", 
                                                Type_assay == 4 ~ "Unclear"),
                      Learning_vs_memory = case_when(Learning_vs_memory == 1 ~ "Learning",
                                                     Learning_vs_memory == 2 ~ "Memory", 
                                                     Learning_vs_memory == 3 ~ "Habituation"),
                      Type_reinforcement = case_when(Type_reinforcement== 1 ~"Appetitive",
                                                         Type_reinforcement== 2 ~ "Aversive",
                                                         Type_reinforcement== 3 ~ "Not applicable",
                                                         Type_reinforcement== 4 ~ "Unclear"),
                      Type_stress_exposure = case_when(Type_stress_exposure == 1 ~ "Density",
                                                       Type_stress_exposure == 2 ~ "Scent",
                                                       Type_stress_exposure == 3 ~ "Shock",
                                                       Type_stress_exposure == 4 ~ "Exertion",
                                                       Type_stress_exposure == 5 ~ "Restraint",
                                                       Type_stress_exposure == 6 ~ "MS",
                                                       Type_stress_exposure == 7 ~ "Circadian rhythm",
                                                       Type_stress_exposure == 8 ~ "Noise",
                                                       Type_stress_exposure == 9 ~ "Other",
                                                       Type_stress_exposure == 10 ~ "Combination",
                                                       Type_stress_exposure == 11 ~ "unclear"), 
                      Age_stress_exposure = case_when(Age_stress_exposure == 1 ~ "Prenatal",
                                                      Age_stress_exposure == 2 ~ "Early postnatal",
                                                      Age_stress_exposure == 3 ~ "Adolescent",
                                                      Age_stress_exposure == 4 ~ "Adult",
                                                      Age_stress_exposure == 5 ~ "Unclear"),
                      Stress_duration = case_when(Stress_duration == 1 ~ "Acute",
                                                  Stress_duration == 2 ~ "Chronic",
                                                  Stress_duration == 3 ~ "Intermittent",
                                                  Stress_duration == 4 ~ "Unclear"),
                      EE_social = case_when(EE_social == 1 ~ "Social",
                                            EE_social== 2 ~ "Non-social", 
                                            EE_social == 3 ~ "Unclear"), 
                      EE_exercise = case_when(EE_exercise == 1 ~ "Exercise", 
                                              EE_exercise == 2 ~ "No exercise"),
                      Age_EE_exposure = case_when(Age_EE_exposure == 1 ~ "Prenatal", 
                                                  Age_EE_exposure == 2 ~ "Early postnatal",
                                                  Age_EE_exposure == 3 ~ "Adolescent", 
                                                  Age_EE_exposure == 4 ~ "Adult",
                                                  Age_EE_exposure == 5 ~ "Unclear"),
                      Exposure_order = case_when(Exposure_order == 1 ~ "Stress first",
                                                      Exposure_order == 2 ~ "Enrichment first",
                                                      Exposure_order == 3 ~ "Concurrently", 
                                                      Exposure_order == 4 ~ "Unclear"),
                      Age_assay = case_when(Age_assay == 1 ~ "Early postnatal",
                                            Age_assay == 2 ~ "Adolescent",
                                            Age_assay == 3 ~ "Adult", 
                                            Age_assay == 4 ~ "Unclear"),
                      Sex = case_when(Sex == 1 ~ "Female", 
                                      Sex == 2 ~ "Male", 
                                      Sex == 3 ~ "Mixed", 
                                      Sex == 4 ~ "Unclear"),
                      Type_EE_exposure = case_when(Type_EE_exposure == 1 ~ "Nesting material",
                                                      Type_EE_exposure == 2 ~ "Objects",
                                                      Type_EE_exposure == 3 ~ "Cage complexity", 
                                                      Type_EE_exposure == 4 ~ "Wheel/trademill",
                                                      Type_EE_exposure == 5 ~ "Combination",
                                                      Type_EE_exposure == 6 ~ "Other", 
                                                      Type_EE_exposure == 7 ~ "Unclear"))

#making variance VCVs
VCV_E <- impute_covariance_matrix(vi = dat$lnRRV_E, cluster = dat$Study_ID, r = 0.5)
VCV_S <- impute_covariance_matrix(vi = dat$lnRRV_S, cluster = dat$Study_ID, r = 0.5)
VCV_ES <- impute_covariance_matrix(vi = dat$lnRRV_ES, cluster = dat$Study_ID, r = 0.5)

VCV_Ea <- impute_covariance_matrix(vi = dat$SMDV_E, cluster = dat$Study_ID, r = 0.5)
VCV_Sa <- impute_covariance_matrix(vi = dat$SMDV_S, cluster = dat$Study_ID, r = 0.5)
VCV_ESa <- impute_covariance_matrix(vi = dat$SMDV_ES, cluster = dat$Study_ID, r = 0.5)

#write.csv(dat, file = here("Data", 'Data_processed.csv'), row.names = TRUE)

Modelling with lnRR

Environmental enrichment

Meta-analysis

Learning and memory are significantly improved when housed with environmnetal enrichment

#dat <- read_csv(here("Data","Data_processed.csv"))

mod_E0 <- rma.mv(yi = lnRR_Ea, V = VCV_E, random = list(~1|Study_ID,
                                                          ~1|ES_ID,
                                                          ~1|Strain),
                 test = "t",
                 data = dat)
summary(mod_E0) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
##  -9.7836   19.5672   27.5672   37.6106   28.0323   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0082  0.0905     30     no  Study_ID 
## sigma^2.2  0.0345  0.1858     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Heterogeneity:
## Q(df = 91) = 809.2712, p-val < .0001
## 
## Model Results:
## 
## estimate      se    tval  df    pval   ci.lb   ci.ub 
##   0.1860  0.0320  5.8116  91  <.0001  0.1224  0.2496  *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_E0) 
##     I2_total  I2_Study_ID     I2_ES_ID    I2_Strain 
## 9.310673e-01 1.786605e-01 7.524068e-01 6.097041e-10
orchard_plot(mod_E0, mod = "Int", xlab = "lnRR", alpha=0.4) +  # Orchard plot 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5)+ # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2)+ # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_colour_manual(values = "darkorange")+ # change colours
  scale_fill_manual(values="darkorange")+ 
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 24), # change font sizes
        legend.title = element_text(size = 15),
        legend.text = element_text(size = 13)) 

Meta-regression: uni-moderator

Type of assay

The type of learning/memory response

dat1 <- filter(dat, Type_assay %in% c("Recognition", "Habituation", "Conditioning"))
VCV_E1 <- impute_covariance_matrix(vi = dat1$lnRRV_E, cluster = dat$Study_ID, r = 0.5)

mod_E1 <- rma.mv(yi = lnRR_Ea, V = VCV_E1, mod = ~Type_assay-1, random = list(~1|Study_ID,
                                                                                  ~1|ES_ID,
                                                                                  ~1|Strain),
                 test = "t",
                 data = dat1)

summary(mod_E1)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
##  -7.8496   15.6993   27.6993   42.6311   28.7237   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0099  0.0995     30     no  Study_ID 
## sigma^2.2  0.0321  0.1792     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 89) = 661.8903, p-val < .0001
## 
## Test of Moderators (coefficients 1:3):
## F(df1 = 3, df2 = 89) = 12.7993, p-val < .0001
## 
## Model Results:
## 
##                         estimate      se    tval  df    pval    ci.lb   ci.ub 
## Type_assayConditioning    0.2167  0.0351  6.1650  89  <.0001   0.1468  0.2865 
## Type_assayHabituation     0.1821  0.1147  1.5886  89  0.1157  -0.0457  0.4100 
## Type_assayRecognition     0.0554  0.0640  0.8659  89  0.3889  -0.0717  0.1826 
##  
## Type_assayConditioning  *** 
## Type_assayHabituation 
## Type_assayRecognition 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_E1) 
##   R2_marginal R2_coditional 
##    0.07376134    1.00000000
Learning_E <- orchard_plot(mod_E1, mod = "Type_assay", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Learning_E

Learning vs Memory

Is the assay broadly measuring learning or memory?

mod_E2 <-  rma.mv(yi = lnRR_Ea, V = VCV_E, mod = ~Learning_vs_memory-1, random = list(~1|Study_ID,
                                                                                        ~1|ES_ID,
                                                                                        ~1|Strain),
                  test = "t",
                  data = dat)

summary(mod_E2) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
##  -9.3793   18.7586   28.7586   41.2577   29.4729   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0076  0.0873     30     no  Study_ID 
## sigma^2.2  0.0340  0.1843     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 90) = 802.5794, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 90) = 18.2861, p-val < .0001
## 
## Model Results:
## 
##                             estimate      se    tval  df    pval   ci.lb 
## Learning_vs_memoryLearning    0.2303  0.0450  5.1227  90  <.0001  0.1410 
## Learning_vs_memoryMemory      0.1624  0.0355  4.5713  90  <.0001  0.0919 
##                              ci.ub 
## Learning_vs_memoryLearning  0.3197  *** 
## Learning_vs_memoryMemory    0.2330  *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_E2) 
##   R2_marginal R2_coditional 
##    0.02572583    1.00000000
LvsM_E<- orchard_plot(mod_E2, mod = "Learning_vs_memory", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

LvsM_E

Type of reinforcement

The type of cue used

dat2 <- filter(dat, Type_reinforcement %in% c("Appetitive", "Aversive", "Not applicable"))
VCV_E2 <- impute_covariance_matrix(vi = dat2$lnRRV_E, cluster = dat2$Study_ID, r = 0.5)
mod_E3 <- rma.mv(yi = lnRR_Ea, V = VCV_E2, mod = ~ Type_reinforcement-1, random = list(~1|Study_ID,
                                                                                            ~1|ES_ID,
                                                                                            ~1|Strain),
                 test = "t",
                 data = dat2)

summary(mod_E3)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
##  -7.8702   15.7405   27.7405   42.6723   28.7649   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0109  0.1046     30     no  Study_ID 
## sigma^2.2  0.0319  0.1787     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 89) = 764.5984, p-val < .0001
## 
## Test of Moderators (coefficients 1:3):
## F(df1 = 3, df2 = 89) = 12.4138, p-val < .0001
## 
## Model Results:
## 
##                                   estimate      se    tval  df    pval    ci.lb 
## Type_reinforcementAppetitive        0.2175  0.0726  2.9942  89  0.0036   0.0732 
## Type_reinforcementAversive          0.2191  0.0410  5.3456  89  <.0001   0.1377 
## Type_reinforcementNot applicable    0.0812  0.0560  1.4504  89  0.1505  -0.0300 
##                                    ci.ub 
## Type_reinforcementAppetitive      0.3618   ** 
## Type_reinforcementAversive        0.3006  *** 
## Type_reinforcementNot applicable  0.1924      
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_E3) 
##   R2_marginal R2_coditional 
##    0.07720103    1.00000000
Reinforcement_E <-orchard_plot(mod_E3, mod = "Type_reinforcement", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Reinforcement_E

Social enrichment

Does EE also include a manipulation of social environment? Note that we excluded any studies that exclusively used social enrichment.s

dat5 <- filter(dat, EE_social %in% c("Social", "Non-social"))
VCV_E5 <- impute_covariance_matrix(vi = dat5$lnRRV_E, cluster = dat$Study_ID, r = 0.5)
  
mod_E4<- rma.mv(yi = lnRR_Ea, V = VCV_E5, mod = ~EE_social-1, random = list(~1|Study_ID, 
                                                                             ~1|ES_ID,
                                                                             ~1|Strain),
                test = "t",data = dat5)

summary(mod_E4)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
##  -9.3254   18.6507   28.6507   41.1498   29.3650   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0095  0.0974     30     no  Study_ID 
## sigma^2.2  0.0340  0.1844     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 90) = 801.5313, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 90) = 17.0110, p-val < .0001
## 
## Model Results:
## 
##                      estimate      se    tval  df    pval   ci.lb   ci.ub 
## EE_socialNon-social    0.1413  0.0498  2.8349  90  0.0057  0.0423  0.2403   ** 
## EE_socialSocial        0.2204  0.0432  5.0976  90  <.0001  0.1345  0.3063  *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_E4) 
##   R2_marginal R2_coditional 
##    0.03377455    1.00000000
Social_E <-orchard_plot(mod_E4, mod = "EE_social", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Social_E 

Exercise enrichment

Does the form of enrichment include exercise through a running wheel or treadmill?

mod_E5<- rma.mv(yi = lnRR_Ea, V = VCV_E, mod = ~EE_exercise-1, random = list(~1|Study_ID,
                                                                               ~1|ES_ID,
                                                                               ~1|Strain),
                test = "t",
                data = dat)

summary(mod_E5)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -10.0303   20.0606   30.0606   42.5597   30.7749   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0096  0.0980     30     no  Study_ID 
## sigma^2.2  0.0345  0.1857     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 90) = 807.7169, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 90) = 16.1666, p-val < .0001
## 
## Model Results:
## 
##                         estimate      se    tval  df    pval   ci.lb   ci.ub 
## EE_exerciseExercise       0.1849  0.0407  4.5464  90  <.0001  0.1041  0.2657 
## EE_exerciseNo exercise    0.1900  0.0556  3.4151  90  0.0010  0.0795  0.3005 
##  
## EE_exerciseExercise     *** 
## EE_exerciseNo exercise  *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_E5) 
##   R2_marginal R2_coditional 
##  0.0001360993  0.9999999987
Exercise_E <-orchard_plot(mod_E5, mod = "EE_exercise", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Exercise_E

Age of enrichment

The age at which the individuals were exposed to environmental enrichment.

dat6 <- filter(dat, Age_EE_exposure %in% c("Adult", "Adolescent"))
VCV_E6 <- impute_covariance_matrix(vi = dat6$lnRRV_E, cluster = dat6$Study_ID, r = 0.5)


mod_E6 <- rma.mv(yi = lnRR_Ea, V = VCV_E6, mod = ~Age_EE_exposure-1, random = list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                 test = "t",
                 data = dat6)

summary(mod_E6) 
## 
## Multivariate Meta-Analysis Model (k = 88; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
##  -6.9490   13.8980   23.8980   36.1698   24.6480   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0078  0.0885     29     no  Study_ID 
## sigma^2.2  0.0327  0.1808     88     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 86) = 782.6092, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 86) = 18.9060, p-val < .0001
## 
## Model Results:
## 
##                            estimate      se    tval  df    pval   ci.lb   ci.ub 
## Age_EE_exposureAdolescent    0.1799  0.0370  4.8553  86  <.0001  0.1062  0.2535 
## Age_EE_exposureAdult         0.2340  0.0620  3.7734  86  0.0003  0.1107  0.3573 
##  
## Age_EE_exposureAdolescent  *** 
## Age_EE_exposureAdult       *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_E6) 
##   R2_marginal R2_coditional 
##    0.01127347    1.00000000
Age_E<- orchard_plot(mod_E6, mod = "Age_EE_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10))  

Age_E

Meta-regression: multi-moderator model

# filter data so that all K < 5 are removed
dat_Efm <- dat %>%
  filter(Type_assay %in% c("Recognition", "Habituation", "Conditioning"),
         Type_reinforcement %in% c("Appetitive", "Aversive", "Not applicable"),
         EE_social %in% c("Social", "Non-social"), 
         Age_EE_exposure %in% c("Adult", "Adolescent"))

VCV_Efm <- impute_covariance_matrix(vi = dat_Efm$lnRRV_E, cluster = dat_Efm$Study_ID, r = 0.5)
                 
mod_Efm <- rma.mv(yi = lnRR_Sa, V = VCV_Efm, mod = ~Type_assay-1 + Learning_vs_memory + Type_reinforcement + EE_social + EE_exercise + Age_EE_exposure , random =   list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                    test = "t",
                 data = dat_Efm)
#summary(mod_Efm)
#r2_ml(mod_Efm) 

res_Efm <- dredge(mod_Efm, trace=2)
saveRDS(res_Efm, file = here("Rdata", "res_Efm.rds"))
# also saving the full model and data
saveRDS(mod_Efm, file = here("Rdata", "mod_Efm.rds"))
saveRDS(dat_Efm, file = here("Rdata", "dat_Efm.rds"))
dat_Efm <- readRDS(file = here("Rdata", "dat_Efm.rds"))
mod_Efm <- readRDS(file = here("Rdata", "mod_Efm.rds"))
res_Efm <- readRDS(file = here("Rdata", "res_Efm.rds"))
res_Efm2<- subset(res_Efm, delta <= 6, recalc.weights=FALSE)
importance(res_Efm2)
##                      Age_EE_exposure Type_assay EE_exercise EE_social
## Sum of weights:      0.63            0.36       0.16        0.14     
## N containing models:   11              10          5           5     
##                      Learning_vs_memory Type_reinforcement
## Sum of weights:      0.10               0.07              
## N containing models:    4                  4

Publication bias & sensitivity analysis

Publication bias tests

# funnel plot
Funnel_E<-funnel(mod_Efm, xlab = "lnRR", ylab = "Standard Error")

Funnel_E
##               x         y slab
## 1  -0.213497315 0.2433196    1
## 2  -0.173536758 0.3659399    2
## 3  -0.276500151 0.3944014    3
## 4  -0.102871039 0.2917889    4
## 5  -0.431860581 0.2607712    5
## 6  -0.220765234 0.3358620    6
## 7   0.123313002 0.2903953    7
## 8  -0.130681176 0.2203808    8
## 9  -0.377176406 0.2025027    9
## 10 -0.254349014 0.2677791   10
## 11  0.454080733 0.2772217   11
## 12 -0.561458736 0.3476971   12
## 13  0.673795583 0.3220059   13
## 14 -0.484338897 0.2614931   14
## 15 -0.424367932 0.2470021   15
## 16 -0.679891992 0.2875585   16
## 17 -0.330715414 0.2635523   17
## 18 -0.001942387 0.2036091   18
## 19  0.067763438 0.2030761   19
## 20  0.038435858 0.2546488   20
## 21  0.107274934 0.2004581   21
## 22  0.134554017 0.1891676   22
## 23  0.077353257 0.1941812   23
## 24  0.069783755 0.1959847   24
## 25 -0.147595466 0.1654517   25
## 26 -0.028117212 0.2036493   26
## 27  0.009803768 0.2086964   27
## 28  0.025882437 0.2246031   28
## 29  0.310018621 0.2270771   29
## 30 -0.221075048 0.2370404   30
## 31 -0.021333180 0.2042171   31
## 32 -0.024739571 0.2039836   32
## 33  0.024018193 0.2070339   33
## 34  0.055386135 0.2082554   34
## 35  0.011204912 0.2118017   35
## 36  0.269761044 0.2234418   36
## 37 -0.027431091 0.2616730   37
## 38  0.097383495 0.2201198   38
## 39  0.129821024 0.2274078   39
## 40  0.117013214 0.2118235   40
## 41  0.134604274 0.2121146   41
## 42  0.131395595 0.1826574   42
## 43  0.199838051 0.1848095   43
## 44  0.163896645 0.1854228   44
## 45  0.232145273 0.1907385   45
## 46  0.201251496 0.1853235   46
## 47  0.115420490 0.1933631   47
## 48  0.191292338 0.1971748   48
## 49  0.025102696 0.2057490   49
## 50 -0.090383649 0.2041439   50
## 51  0.063283209 0.1681331   51
## 52  0.404796892 0.2138700   52
## 53  0.496027289 0.2117179   53
## 54 -0.258236598 0.2166538   54
## 55 -0.203668502 0.2234975   55
## 56 -0.095266297 0.2026806   56
## 57 -0.127501308 0.2558545   57
## 58 -0.574954185 0.3729284   58
## 59 -0.201834800 0.2182224   59
## 60  0.238276751 0.2681091   60
## 61  0.474869293 0.4008041   61
## 62 -0.696237252 0.4468660   62
## 63 -0.432296145 0.2320240   63
## 64 -0.371862448 0.3217018   64
## 65  0.184084317 0.3001181   65
## 66 -0.360884079 0.3144041   66
## 67 -0.335056871 0.1984419   67
## 68 -0.201138369 0.1967313   68
## 69 -0.035550172 0.2030628   69
## 70 -0.102430545 0.3971746   70
## 71  0.508752824 0.2946114   71
## 72  0.438694650 0.2782131   72
## 73  0.467023121 0.3276902   73
## 74 -0.206361779 0.2538102   74
## 75 -0.092873206 0.2153932   75
## 76  0.064099073 0.2602010   76
## 77 -0.031175240 0.2103472   77
## 78 -0.217869097 0.2137524   78
## 79 -0.082144895 0.2104206   79
## 80  0.015058876 0.3001287   80
## 81  0.130606403 0.1865150   81
## 82  0.311232540 0.4672652   82
## 83 -0.162882220 0.2247811   83
## 84  0.289463536 0.5530860   84
## 85  0.345157695 0.2079106   85
## 86 -1.277954082 0.7053474   86
## 87  0.024867129 0.2317755   87
## 88  0.107414213 0.3971615   88
#year published was scaled previously  under stress PB

dat_Efm$sqrt_inv_e_n <- with(dat_Efm, sqrt(1/CC_n + 1/EC_n + 1/ES_n + 1/CS_n))

PB_MR_E<- rma.mv(lnRR_Sa, lnRRV_S, mods = ~1 + sqrt_inv_e_n +  Learning_vs_memory + Year_published + Type_assay + Type_reinforcement + EE_social + EE_exercise + Age_stress_exposure, random = list(~1|Study_ID,
                                                          ~1|ES_ID,
                                                          ~1|Strain), method = "REML", test = "t", 
    data = dat_Efm)

estimates_PB_MR_E<- estimates.CI(PB_MR_E)
estimates_PB_MR_E
##                              estimate         mean        lower       upper
## 1                             intrcpt -8.327518592 -48.07119985 31.41616266
## 2                        sqrt_inv_e_n -0.109285094  -0.87720366  0.65863347
## 3            Learning_vs_memoryMemory -0.069013676  -0.16279041  0.02476306
## 4                      Year_published  0.004107572  -0.01566019  0.02387534
## 5               Type_assayHabituation -0.581287761  -0.96454156 -0.19803397
## 6               Type_assayRecognition -0.138892812  -0.43927047  0.16148484
## 7          Type_reinforcementAversive  0.180286955  -0.12948049  0.49005440
## 8    Type_reinforcementNot applicable  0.347157232  -0.07216803  0.76648249
## 9                     EE_socialSocial  0.049422109  -0.16084227  0.25968649
## 10             EE_exerciseNo exercise -0.040127169  -0.27510528  0.19485094
## 11           Age_stress_exposureAdult -0.164507296  -0.53782472  0.20881013
## 12 Age_stress_exposureEarly postnatal -0.070780676  -0.45223223  0.31067088
## 13        Age_stress_exposurePrenatal -0.142070893  -0.53156091  0.24741912
#no effect of inv_effective sample size or year published

Leave-one-out analysis

dat$Study_ID <- as.factor(dat$Study_ID)

LeaveOneOut_effectsize <- list()
for(i in 1:length(levels(dat$Study_ID))){
  LeaveOneOut_effectsize[[i]] <- rma.mv(yi = lnRR_Ea, V = lnRRV_E, 
                                        random = list(~1 | Study_ID,~1| ES_ID, ~1 | Strain), 
                                        method = "REML", data = dat[dat$Study_ID
                                                                    != levels(dat$Study_ID)[i], ])}


# writing function for extracting est, ci.lb, and ci.ub from all models
est.func <- function(mod_E0){
  df <- data.frame(est = mod_E0$b, lower = mod_E0$ci.lb, upper = mod_E0$ci.ub)
  return(df)
}


#using dplyr to form data frame
MA_CVR_E <- lapply(LeaveOneOut_effectsize, function(x) est.func(x))%>% bind_rows %>% mutate(left_out = levels(dat$Study_ID))


saveRDS(MA_CVR_E,file = here("Rdata", "MA_CVR_E.rds"))
#telling ggplot to stop reordering factors
MA_CVR_E <- readRDS(file = here("Rdata", "MA_CVR_E.rds"))

MA_CVR_E$left_out<- as.factor(MA_CVR_E$left_out)
MA_CVR_E$left_out<-factor(MA_CVR_E$left_out, levels = MA_CVR_E$left_out)


#plotting
leaveoneout_E <- ggplot(MA_CVR_E) +
  geom_hline(yintercept = 0, lty = 2, lwd = 1) +
  geom_hline(yintercept = mod_E0$ci.lb, lty = 3, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_E0$b, lty = 1, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_E0$ci.ub, lty = 3, lwd = 0.75, colour = "black") +
  geom_pointrange(aes(x = left_out, y = est, ymin = lower, ymax = upper)) +
  xlab("Study left out") + 
  ylab("lnRR, 95% CI") + 
  coord_flip() +
  theme(panel.grid.minor = element_blank())+
  theme_bw() + theme(panel.grid.major = element_blank()) +
  theme(panel.grid.minor.x = element_blank() ) +
  theme(axis.text.y = element_text(size = 6))

leaveoneout_E

dat$Study_ID <- as.integer(dat$Study_ID)

Stress

Intercept model

Learning and memory are significantly reduced due to stress. High heterogeneity

mod_S0 <- rma.mv(yi = lnRR_Sa, V = VCV_S, random = list(~1|Study_ID,
                                                          ~1|ES_ID,
                                                          ~1|Strain),
                 test = "t", 
                 data = dat)

summary(mod_S0) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -14.1156   28.2312   36.2312   46.2747   36.6964   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0099  0.0993     30     no  Study_ID 
## sigma^2.2  0.0377  0.1941     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Heterogeneity:
## Q(df = 91) = 946.9234, p-val < .0001
## 
## Model Results:
## 
## estimate      se     tval  df    pval    ci.lb    ci.ub 
##  -0.1052  0.0337  -3.1217  91  0.0024  -0.1721  -0.0383  ** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_S0) 
##     I2_total  I2_Study_ID     I2_ES_ID    I2_Strain 
## 9.376124e-01 1.946168e-01 7.429955e-01 1.975455e-10
orchard_plot(mod_S0, mod = "Int", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 24), # change font sizes
        legend.title = element_text(size = 15),
        legend.text = element_text(size = 13)) 

Meta-regression

Uni-moderator metaregression

Type of assay

The type of learning/memory response

dat$Type_assay<-as.factor(dat$Type_assay)

VCV_S1 <- impute_covariance_matrix(vi = dat1$lnRRV_S, cluster = dat$Study_ID, r = 0.5)


mod_S1 <- rma.mv(yi = lnRR_Sa, V = VCV_S1, mod = ~Type_assay-1, random =   list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                 test = "t",
                 data = dat1)

summary(mod_S1)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
##  -9.8028   19.6055   31.6055   46.5374   32.6299   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0161  0.1271     30     no  Study_ID 
## sigma^2.2  0.0279  0.1671     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 89) = 723.4973, p-val < .0001
## 
## Test of Moderators (coefficients 1:3):
## F(df1 = 3, df2 = 89) = 6.7053, p-val = 0.0004
## 
## Model Results:
## 
##                         estimate      se     tval  df    pval    ci.lb    ci.ub 
## Type_assayConditioning   -0.0981  0.0375  -2.6192  89  0.0104  -0.1725  -0.0237 
## Type_assayHabituation    -0.4615  0.1126  -4.0969  89  <.0001  -0.6853  -0.2377 
## Type_assayRecognition    -0.0534  0.0645  -0.8287  89  0.4095  -0.1816   0.0747 
##  
## Type_assayConditioning    * 
## Type_assayHabituation   *** 
## Type_assayRecognition 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_S1) 
##   R2_marginal R2_coditional 
##     0.1853359     1.0000000
Learning_S <-orchard_plot(mod_S1, mod = "Type_assay", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Learning_S

Learning vs Memory

Is the assay broadly measuring learning or memory?

mod_S2 <-  rma.mv(yi = lnRR_Sa, V = VCV_S, mod = ~Learning_vs_memory-1, random = list(~1|Study_ID,
                                                                                        ~1|ES_ID,
                                                                                        ~1|Strain),
                  test = "t",
                  data = dat)

summary(mod_S2) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -14.5281   29.0562   39.0562   51.5553   39.7705   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0094  0.0970     30     no  Study_ID 
## sigma^2.2  0.0388  0.1969     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 90) = 946.3930, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 90) = 5.0145, p-val = 0.0086
## 
## Model Results:
## 
##                             estimate      se     tval  df    pval    ci.lb 
## Learning_vs_memoryLearning   -0.1211  0.0476  -2.5423  90  0.0127  -0.2157 
## Learning_vs_memoryMemory     -0.0974  0.0380  -2.5648  90  0.0120  -0.1728 
##                               ci.ub 
## Learning_vs_memoryLearning  -0.0265  * 
## Learning_vs_memoryMemory    -0.0219  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_S2) 
##   R2_marginal R2_coditional 
##   0.002776034   1.000000000
LvsM_S <- orchard_plot(mod_S2, mod = "Learning_vs_memory", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

LvsM_S 

Type of reinforcement

The type of cue used

VCV_S2 <- impute_covariance_matrix(vi = dat2$lnRRV_S, cluster = dat$Study_ID, r = 0.5)

mod_S3 <- rma.mv(yi = lnRR_Sa, V = VCV_S2, mod = ~ Type_reinforcement-1, random = list(~1|Study_ID,
                                                                                            ~1|ES_ID,
                                                                                            ~1|Strain),
                 test = "t",
                 data = dat2)

summary(mod_S3)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -13.8810   27.7621   39.7621   54.6939   40.7864   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0103  0.1016     30     no  Study_ID 
## sigma^2.2  0.0387  0.1966     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 89) = 920.8439, p-val < .0001
## 
## Test of Moderators (coefficients 1:3):
## F(df1 = 3, df2 = 89) = 3.7942, p-val = 0.0130
## 
## Model Results:
## 
##                                   estimate      se     tval  df    pval 
## Type_reinforcementAppetitive       -0.1846  0.0749  -2.4649  89  0.0156 
## Type_reinforcementAversive         -0.0730  0.0427  -1.7081  89  0.0911 
## Type_reinforcementNot applicable   -0.1172  0.0590  -1.9851  89  0.0502 
##                                     ci.lb    ci.ub 
## Type_reinforcementAppetitive      -0.3334  -0.0358  * 
## Type_reinforcementAversive        -0.1579   0.0119  . 
## Type_reinforcementNot applicable  -0.2345   0.0001  . 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_S3) 
##   R2_marginal R2_coditional 
##     0.0366428     1.0000000
Reinforcement_S <-orchard_plot(mod_S3, mod = "Type_reinforcement", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Reinforcement_S

Type of stress

The type of stress manipulation

dat3 <- filter(dat, Type_stress_exposure %in% c("Restraint", "Noise", "MS", "Combination"))
VCV_S3 <- impute_covariance_matrix(vi = dat3$lnRRV_S, cluster = dat3$Study_ID, r = 0.5)

mod_S4 <- rma.mv(yi = lnRR_Sa, V = VCV_S3, mod = ~Type_stress_exposure-1, random = list(~1|Study_ID,
                                                                                         ~1|ES_ID,
                                                                                         ~1|Strain),
                 test = "t",
                 data = dat3)
summary(mod_S4) 
## 
## Multivariate Meta-Analysis Model (k = 85; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -11.4138   22.8276   36.8276   53.5887   38.3618   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0115  0.1071     25     no  Study_ID 
## sigma^2.2  0.0401  0.2002     85     no     ES_ID 
## sigma^2.3  0.0000  0.0000      4     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 81) = 897.4553, p-val < .0001
## 
## Test of Moderators (coefficients 1:4):
## F(df1 = 4, df2 = 81) = 2.8767, p-val = 0.0278
## 
## Model Results:
## 
##                                  estimate      se     tval  df    pval    ci.lb 
## Type_stress_exposureCombination   -0.0500  0.0892  -0.5605  81  0.5767  -0.2274 
## Type_stress_exposureMS            -0.0539  0.0560  -0.9630  81  0.3384  -0.1653 
## Type_stress_exposureNoise         -0.1203  0.1036  -1.1608  81  0.2491  -0.3265 
## Type_stress_exposureRestraint     -0.2101  0.0704  -2.9863  81  0.0037  -0.3501 
##                                    ci.ub 
## Type_stress_exposureCombination   0.1274     
## Type_stress_exposureMS            0.0575     
## Type_stress_exposureNoise         0.0859     
## Type_stress_exposureRestraint    -0.0701  ** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_S4)
##   R2_marginal R2_coditional 
##    0.07029554    1.00000000
Stressor<- orchard_plot(mod_S4, mod = "Type_stress_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Stressor

Age of stress

VCV_S3a <- impute_covariance_matrix(vi = dat$lnRRV_S, cluster = dat$Study_ID, r = 0.5)

mod_S5 <-rma.mv(yi = lnRR_Sa, V = VCV_S3a, mod = ~Age_stress_exposure-1, random = list(~1|Study_ID,
                                                                                       ~1|ES_ID,
                                                                                       ~1|Strain),
                test = "t",
                data = dat)
summary(mod_S5) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -12.4083   24.8166   38.8166   56.1579   40.2166   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0048  0.0694     30     no  Study_ID 
## sigma^2.2  0.0392  0.1979     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 88) = 881.1229, p-val < .0001
## 
## Test of Moderators (coefficients 1:4):
## F(df1 = 4, df2 = 88) = 4.3703, p-val = 0.0029
## 
## Model Results:
## 
##                                     estimate      se     tval  df    pval 
## Age_stress_exposureAdolescent         0.0074  0.1159   0.0641  88  0.9490 
## Age_stress_exposureAdult             -0.2279  0.0622  -3.6664  88  0.0004 
## Age_stress_exposureEarly postnatal   -0.0561  0.0435  -1.2891  88  0.2007 
## Age_stress_exposurePrenatal          -0.1145  0.0743  -1.5404  88  0.1271 
##                                       ci.lb    ci.ub 
## Age_stress_exposureAdolescent       -0.2228   0.2377      
## Age_stress_exposureAdult            -0.3514  -0.1044  *** 
## Age_stress_exposureEarly postnatal  -0.1425   0.0304      
## Age_stress_exposurePrenatal         -0.2621   0.0332      
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_S5) 
##   R2_marginal R2_coditional 
##    0.09987307    1.00000000
Age_S <- orchard_plot(mod_S5, mod = "Age_stress_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Age_S 

Stess duration

How long was the stress applied for (chronic = every day for 7 days or more)? This has the highest marginal R2

dat4 <- filter(dat, Stress_duration %in% c("Chronic", "Acute"))
VCV_S4 <- impute_covariance_matrix(vi = dat4$lnRRV_S, cluster = dat4$Study_ID, r = 0.5)

mod_S6 <-rma.mv(yi = lnRR_Sa, V = VCV_S4, mod = ~Stress_duration-1, random = list(~1|Study_ID,
                                                                                   ~1|ES_ID,
                                                                                   ~1|Strain),
                test = "t",
                data = dat4)
summary(mod_S6) 
## 
## Multivariate Meta-Analysis Model (k = 89; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -13.7898   27.5796   37.5796   49.9092   38.3204   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0062  0.0786     29     no  Study_ID 
## sigma^2.2  0.0391  0.1978     89     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 87) = 915.9393, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 87) = 6.6661, p-val = 0.0020
## 
## Model Results:
## 
##                         estimate      se     tval  df    pval    ci.lb    ci.ub 
## Stress_durationAcute      0.0135  0.0659   0.2045  87  0.8384  -0.1176   0.1445 
## Stress_durationChronic   -0.1360  0.0373  -3.6456  87  0.0005  -0.2101  -0.0618 
##  
## Stress_durationAcute 
## Stress_durationChronic  *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_S6) 
##   R2_marginal R2_coditional 
##    0.08245896    1.00000000
Duration_S <- orchard_plot(mod_S6, mod = "Stress_duration", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Duration_S 

Meta-regression: multi-moderator model

#selecting moderator levels with k >=5
dat_Sfm <- dat %>%
  filter(Type_assay %in% c("Recognition", "Habituation", "Conditioning"),
         Type_reinforcement %in% c("Appetitive", "Aversive", "Not applicable"),
         Type_stress_exposure %in% c("Restraint", "Noise", "MS", "Combination"),
         Stress_duration %in% c("Chronic", "Acute"))

VCV_Sfm <- impute_covariance_matrix(vi = dat_Sfm$lnRRV_E, cluster = dat_Sfm$Study_ID, r = 0.5)
                 
mod_Sfm <- rma.mv(yi = lnRR_Sa, V = VCV_Sfm, mod = ~ Type_assay -1 + Learning_vs_memory + Type_reinforcement + Type_stress_exposure + Age_stress_exposure + Stress_duration, random =   list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                    test = "t",
                 data = dat_Sfm)
#summary(mod_Sfm)
#r2_ml(mod_Sfm) 

res_Sfm <- dredge(mod_Sfm, trace=2)
saveRDS(res_Sfm, file = here("Rdata", "res_Sfm.rds"))
# also saving the full model and data
saveRDS(mod_Sfm, file = here("Rdata", "mod_Sfm.rds"))
saveRDS(dat_Sfm, file = here("Rdata", "dat_Sfm.rds"))
dat_Sfm <- readRDS(file = here("Rdata", "dat_Sfm.rds"))
mod_Sfm <- readRDS(file = here("Rdata", "mod_Sfm.rds"))
res_Sfm <- readRDS(file = here("Rdata", "res_Sfm.rds"))
res_Sfm2<- subset(res_Sfm, delta <= 6, recalc.weights=FALSE)
importance(res_Sfm2) 
##                      Type_assay Stress_duration Type_stress_exposure
## Sum of weights:      0.91       0.88            0.22                
## N containing models:    7          6               2                
##                      Learning_vs_memory Age_stress_exposure Type_reinforcement
## Sum of weights:      0.16               0.04                0.04              
## N containing models:    2                  1                   1

Publication bias & sensitivity analysis

Publication bias

# funnel plot
Funnel_S <- funnel(mod_Sfm, xlab = "lnRR", ylab = "Standard Error")

Funnel_S
##               x          y slab
## 1  -0.113053232 0.36468860    1
## 2  -0.015439898 0.20610340    2
## 3  -0.248010857 0.31117978    3
## 4  -0.232933972 0.39558646    4
## 5  -0.042387513 0.28415283    5
## 6  -0.371377055 0.24616892    6
## 7  -0.299243891 0.33372726    7
## 8   0.044834345 0.28792375    8
## 9   0.126615612 0.17056933    9
## 10 -0.177388895 0.16829598   10
## 11 -0.247450735 0.25159343   11
## 12  0.460979011 0.26162092   12
## 13 -0.554560458 0.33539070   13
## 14  0.680693862 0.30867691   14
## 15 -0.471382295 0.25172620   15
## 16 -0.394493983 0.23638600   16
## 17 -0.650018042 0.27849269   17
## 18 -0.300841465 0.25362984   18
## 19  0.027931562 0.19059106   19
## 20  0.080720040 0.19033477   20
## 21  0.041790076 0.24429542   21
## 22  0.203537585 0.10208024   22
## 23  0.124260874 0.14463442   23
## 24  0.116691372 0.14706024   24
## 25 -0.121449889 0.14833999   25
## 26 -0.001971635 0.19205933   26
## 27 -0.003907599 0.20022315   27
## 28  0.120523677 0.21113795   28
## 29  0.404659861 0.21388659   29
## 30 -0.109516461 0.22302661   30
## 31 -0.001061616 0.19288079   31
## 32 -0.004468007 0.19262936   32
## 33  0.043909789 0.18566567   33
## 34  0.115869661 0.18965184   34
## 35 -0.011278194 0.09467548   35
## 36  0.175061863 0.18048966   36
## 37 -0.122130273 0.22610402   37
## 38  0.002684314 0.17636045   38
## 39  0.035121843 0.18537686   39
## 40 -0.156416652 0.15860530   40
## 41 -0.138825592 0.15899389   41
## 42  0.053065175 0.17943678   42
## 43  0.138424978 0.18297543   43
## 44  0.102483572 0.18367269   44
## 45  0.170732200 0.19143563   45
## 46  0.139838423 0.18382247   46
## 47  0.128377093 0.17993531   47
## 48  0.221166287 0.18370137   48
## 49  0.044994292 0.18507026   49
## 50 -0.255771493 0.15305831   50
## 51  0.040084756 0.13800224   51
## 52  0.117345431 0.17133452   52
## 53  0.225493174 0.17017071   53
## 54 -0.041531740 0.18021370   54
## 55  0.380896989 0.79456429   55
## 56  0.014647193 0.18626025   56
## 57 -0.239220751 0.20755664   57
## 58  0.200890800 0.26208602   58
## 59  0.478075273 0.39791794   59
## 60 -0.693031272 0.44427913   60
## 61 -0.446007512 0.22443320   61
## 62 -0.385573815 0.31627073   62
## 63  0.187438535 0.29281093   63
## 64 -0.357529861 0.30767463   64
## 65 -0.206432699 0.18443973   65
## 66 -0.072514197 0.18259794   66
## 67 -0.058864366 0.39854817   67
## 68  0.552319003 0.28778615   68
## 69  0.499178177 0.26916891   69
## 70  0.527506647 0.32337519   70
## 71 -0.162795600 0.23927168   71
## 72 -0.032389680 0.19746325   72
## 73  0.107665252 0.24604039   73
## 74  0.209204201 0.16352434   74
## 75 -0.174302918 0.19626869   75
## 76 -0.021661369 0.19202699   76
## 77 -0.024341578 0.23180570   77
## 78  0.052275983 0.18465565   78
## 79  0.331124135 0.46142545   79
## 80 -0.102398694 0.20899450   80
## 81  0.349947062 0.56232826   81
## 82  0.203444436 0.16285272   82
#calculating inv effective sample size for use in full meta-regression
dat_Sfm$sqrt_inv_e_n <- with(dat_Sfm, sqrt(1/CC_n + 1/EC_n + 1/ES_n + 1/CS_n))

#time lag bias and eggers regression
dat_Sfm$c_Year_published <- as.vector(scale(dat_Sfm$Year_published, scale = F))

PB_MR_S<- rma.mv(lnRR_Sa, lnRRV_S, mods = ~1 + sqrt_inv_e_n +  c_Year_published + Type_assay +Learning_vs_memory + Type_reinforcement + Type_stress_exposure + Age_stress_exposure, random = list(~1|Study_ID,
                                                          ~1|ES_ID,
                                                          ~1|Strain), 
                 method = "REML", 
                 test = "t", 
                 data = dat_Sfm,
                  control=list(optimizer="optim", optmethod="Nelder-Mead"))

estimates_PB_MR_S<- estimates.CI(PB_MR_S)
estimates_PB_MR_S
##                              estimate          mean       lower       upper
## 1                             intrcpt -0.0531565147 -0.80198435  0.69567132
## 2                        sqrt_inv_e_n  0.2644127720 -0.90453637  1.43336191
## 3                    c_Year_published -0.0004505009 -0.02602183  0.02512083
## 4               Type_assayHabituation -0.6533807423 -1.04044969 -0.26631179
## 5               Type_assayRecognition -0.1444088134 -0.45497533  0.16615770
## 6            Learning_vs_memoryMemory -0.0795090078 -0.18171343  0.02269541
## 7          Type_reinforcementAversive  0.0994462558 -0.12019678  0.31908929
## 8    Type_reinforcementNot applicable  0.2639213412 -0.10091990  0.62876258
## 9              Type_stress_exposureMS -0.0627988969 -0.76070352  0.63510573
## 10          Type_stress_exposureNoise  0.0539297569 -0.80795528  0.91581480
## 11      Type_stress_exposureRestraint -0.1701899069 -0.56841160  0.22803179
## 12           Age_stress_exposureAdult -0.1949729974 -0.56490678  0.17496079
## 13 Age_stress_exposureEarly postnatal -0.1465963343 -0.89061343  0.59742076
## 14        Age_stress_exposurePrenatal -0.1798979551 -0.63426064  0.27446473
#no effect of inv_effective sample size or year published

Leave-one-out sensitivity analysis

dat$Study_ID <- as.factor(dat$Study_ID)

LeaveOneOut_effectsize <- list()
for(i in 1:length(levels(dat$Study_ID))){
  LeaveOneOut_effectsize[[i]] <- rma.mv(yi = lnRR_Sa, V = lnRRV_S, 
                                        random = list(~1 | Study_ID,~1| ES_ID, ~1 | Strain), 
                                        method = "REML", data = dat[dat$Study_ID
                                                                    != levels(dat$Study_ID)[i], ])}


# writing function for extracting est, ci.lb, and ci.ub from all models
est.func <- function(mod_E0){
  df <- data.frame(est = mod_E0$b, lower = mod_E0$ci.lb, upper = mod_E0$ci.ub)
  return(df)
}


#using dplyr to form data frame
MA_CVR_S <- lapply(LeaveOneOut_effectsize, function(x) est.func(x))%>% bind_rows %>% mutate(left_out = levels(dat$Study_ID))

saveRDS(MA_CVR_S,file = here("Rdata", "MA_CVR_S.rds"))
MA_CVR_S <- readRDS(file = here("Rdata", "MA_CVR_S.rds"))

#telling ggplot to stop reordering factors
MA_CVR_S$left_out<- as.factor(MA_CVR_S$left_out)
MA_CVR_S$left_out<-factor(MA_CVR_S$left_out, levels = MA_CVR_S$left_out)

#plotting
leaveoneout_S <- ggplot(MA_CVR_S) +
  geom_hline(yintercept = 0, lty = 2, lwd = 1) +
  geom_hline(yintercept = mod_S0$ci.lb, lty = 3, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_S0$b, lty = 1, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_S0$ci.ub, lty = 3, lwd = 0.75, colour = "black") +
  geom_pointrange(aes(x = left_out, y = est, ymin = lower, ymax = upper)) +
  xlab("Study left out") + 
  ylab("lnRR, 95% CI") + 
  coord_flip() +
  theme(panel.grid.minor = element_blank())+
  theme_bw() + theme(panel.grid.major = element_blank()) +
  theme(panel.grid.minor.x = element_blank() ) +
  theme(axis.text.y = element_text(size = 6))

leaveoneout_S

dat$Study_ID <- as.integer(dat$Study_ID)

Interaction of stress and EE

Intercept

Enriched and stress animals are better at learning and memory.

mod_ES0 <- rma.mv(yi = lnRR_ESa, V = VCV_ES, random = list(~1|Study_ID,
                                                             ~1|ES_ID,
                                                             ~1|Strain),
                  test = "t", 
                  data = dat)

summary(mod_ES0) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -40.8178   81.6355   89.6355   99.6790   90.1006   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0316  0.1777     30     no  Study_ID 
## sigma^2.2  0.0229  0.1513     92     no     ES_ID 
## sigma^2.3  0.0030  0.0544      6     no    Strain 
## 
## Test for Heterogeneity:
## Q(df = 91) = 303.2179, p-val < .0001
## 
## Model Results:
## 
## estimate      se    tval  df    pval   ci.lb   ci.ub 
##   0.1229  0.0596  2.0605  91  0.0422  0.0044  0.2414  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_ES0) 
##    I2_total I2_Study_ID    I2_ES_ID   I2_Strain 
##  0.81703063  0.44913873  0.32576306  0.04212884
orchard_plot(mod_ES0, mod = "Int", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 24), # change font sizes
        legend.title = element_text(size = 15),
        legend.text = element_text(size = 13)) 

Intercept with outlier removed

dat_outliers <- dat %>%
  filter(ES_ID != 88)

VCV_ES_outliers <- impute_covariance_matrix(vi = dat_outliers$lnRRV_E, cluster = dat_outliers$Study_ID, r = 0.5)

mod_ESoutlier <- rma.mv(yi = lnRR_ESa, V = VCV_ES_outliers, random = list(~1|Study_ID,
                                                             ~1|ES_ID,
                                                             ~1|Strain),
                  test = "t", 
                  data = dat_outliers)

summary(mod_ESoutlier)
## 
## Multivariate Meta-Analysis Model (k = 91; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -40.1745   80.3490   88.3490   98.3483   88.8196   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0351  0.1873     30     no  Study_ID 
## sigma^2.2  0.0703  0.2652     91     no     ES_ID 
## sigma^2.3  0.0218  0.1476      6     no    Strain 
## 
## Test for Heterogeneity:
## Q(df = 90) = 1151.6019, p-val < .0001
## 
## Model Results:
## 
## estimate      se    tval  df    pval   ci.lb   ci.ub 
##   0.2377  0.0990  2.4013  90  0.0184  0.0410  0.4343  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
orchard_plot(mod_ESoutlier, mod = "Int", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 24), # change font sizes
        legend.title = element_text(size = 15),
        legend.text = element_text(size = 13))   

Meta-regression

Uni-moderator meta-regression

Type of assay

The type of learning/memory response

VCV_ES1 <- impute_covariance_matrix(vi = dat1$lnRRV_ES, cluster = dat$Study_ID, r = 0.5)

mod_ES1 <- rma.mv(yi = lnRR_ESa, V = VCV_ES1, mod = ~Type_assay-1, random = list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                  test = "t",
                  data = dat1)

summary(mod_ES1)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -39.0874   78.1748   90.1748  105.1066   91.1992   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0370  0.1924     30     no  Study_ID 
## sigma^2.2  0.0192  0.1386     92     no     ES_ID 
## sigma^2.3  0.0018  0.0422      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 89) = 293.9385, p-val < .0001
## 
## Test of Moderators (coefficients 1:3):
## F(df1 = 3, df2 = 89) = 3.1062, p-val = 0.0305
## 
## Model Results:
## 
##                         estimate      se     tval  df    pval    ci.lb   ci.ub 
## Type_assayConditioning    0.1525  0.0589   2.5877  89  0.0113   0.0354  0.2696 
## Type_assayHabituation     0.1990  0.1415   1.4070  89  0.1629  -0.0820  0.4801 
## Type_assayRecognition    -0.0048  0.0800  -0.0606  89  0.9518  -0.1637  0.1541 
##  
## Type_assayConditioning  * 
## Type_assayHabituation 
## Type_assayRecognition 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES1) 
##   R2_marginal R2_coditional 
##    0.05775809    0.97105550
Learning_ES <- orchard_plot(mod_ES1, mod = "Type_assay", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Learning_ES

Learning vs Memory

Is the assay broadly measuring learning or memory?

mod_ES2 <-  rma.mv(yi = lnRR_ESa, V = VCV_ES, mod = ~Learning_vs_memory-1, random = list(~1|Study_ID, 
                                                                                           ~1|ES_ID,
                                                                                           ~1|Strain),
                   test = "t",
                   data = dat)

summary(mod_ES2) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -40.4769   80.9539   90.9539  103.4529   91.6682   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0292  0.1708     30     no  Study_ID 
## sigma^2.2  0.0232  0.1524     92     no     ES_ID 
## sigma^2.3  0.0034  0.0582      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 90) = 299.1854, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 90) = 2.9219, p-val = 0.0590
## 
## Model Results:
## 
##                             estimate      se    tval  df    pval    ci.lb 
## Learning_vs_memoryLearning    0.1744  0.0722  2.4166  90  0.0177   0.0310 
## Learning_vs_memoryMemory      0.1057  0.0619  1.7065  90  0.0914  -0.0174 
##                              ci.ub 
## Learning_vs_memoryLearning  0.3178  * 
## Learning_vs_memoryMemory    0.2288  . 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES2) 
##   R2_marginal R2_coditional 
##     0.0197648     0.9405080
LvsM_ES <- orchard_plot(mod_ES2, mod = "Learning_vs_memory", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

LvsM_ES 

Type of reinforcement

The type of conditioning used

VCV_ES2 <- impute_covariance_matrix(vi = dat2$lnRRV_ES, cluster = dat2$Study_ID, r = 0.5)

mod_ES3 <- rma.mv(yi = lnRR_ESa, V = VCV_ES2, mod = ~ Type_reinforcement-1, random = list(~1|Study_ID,
                                                                                               ~1|ES_ID,
                                                                                               ~1|Strain),
                  test = "t",
                  data = dat2)

summary(mod_ES3)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -39.0604   78.1208   90.1208  105.0526   91.1452   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0382  0.1954     30     no  Study_ID 
## sigma^2.2  0.0189  0.1377     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 89) = 293.4724, p-val < .0001
## 
## Test of Moderators (coefficients 1:3):
## F(df1 = 3, df2 = 89) = 3.2547, p-val = 0.0254
## 
## Model Results:
## 
##                                   estimate      se    tval  df    pval    ci.lb 
## Type_reinforcementAppetitive        0.1007  0.1075  0.9366  89  0.3515  -0.1129 
## Type_reinforcementAversive          0.1573  0.0569  2.7618  89  0.0070   0.0441 
## Type_reinforcementNot applicable    0.0147  0.0702  0.2101  89  0.8341  -0.1247 
##                                    ci.ub 
## Type_reinforcementAppetitive      0.3143     
## Type_reinforcementAversive        0.2704  ** 
## Type_reinforcementNot applicable  0.1541     
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES3) 
##   R2_marginal R2_coditional 
##     0.0586952     1.0000000
Reinforcement_ES <- orchard_plot(mod_ES3, mod = "Type_reinforcement", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Reinforcement_ES 

Type of stress

The type of stress manipulation

VCV_ES3 <- impute_covariance_matrix(vi = dat3$lnRRV_ES, cluster = dat3$Study_ID, r = 0.5)
mod_ES4 <- rma.mv(yi = lnRR_ESa, V = VCV_ES3, mod = ~Type_stress_exposure-1, random = list(~1|Study_ID,
                                                                                            ~1|ES_ID,
                                                                                            ~1|Strain),
                  test = "t",
                  data = dat3)
summary(mod_ES4)
## 
## Multivariate Meta-Analysis Model (k = 85; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -34.4046   68.8091   82.8091   99.5703   84.3434   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0426  0.2064     25     no  Study_ID 
## sigma^2.2  0.0232  0.1524     85     no     ES_ID 
## sigma^2.3  0.0104  0.1021      4     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 81) = 281.9708, p-val < .0001
## 
## Test of Moderators (coefficients 1:4):
## F(df1 = 4, df2 = 81) = 0.5137, p-val = 0.7258
## 
## Model Results:
## 
##                                  estimate      se    tval  df    pval    ci.lb 
## Type_stress_exposureCombination    0.1111  0.1458  0.7618  81  0.4484  -0.1790 
## Type_stress_exposureMS             0.1185  0.1099  1.0784  81  0.2840  -0.1001 
## Type_stress_exposureNoise          0.1651  0.1795  0.9198  81  0.3604  -0.1920 
## Type_stress_exposureRestraint      0.1374  0.1252  1.0978  81  0.2755  -0.1116 
##                                   ci.ub 
## Type_stress_exposureCombination  0.4011    
## Type_stress_exposureMS           0.3370    
## Type_stress_exposureNoise        0.5221    
## Type_stress_exposureRestraint    0.3865    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES4)
##   R2_marginal R2_coditional 
##   0.004455703   0.863910284
Stressor_ES <- orchard_plot(mod_ES4, mod = "Type_stress_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7))  

Stressor_ES 

Age of stress

The age at which the individuals were exposed to the stressor.

mod_ES5 <-rma.mv(yi = lnRR_ESa, V = VCV_ES, mod = ~Age_stress_exposure-1, random = list(~1|Study_ID,
                                                                                          ~1|ES_ID,
                                                                                          ~1|Strain),
                 test = "t",
                 data = dat)
summary(mod_ES5) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -39.0917   78.1834   92.1834  109.5247   93.5834   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0295  0.1718     30     no  Study_ID 
## sigma^2.2  0.0232  0.1522     92     no     ES_ID 
## sigma^2.3  0.0091  0.0954      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 88) = 286.4225, p-val < .0001
## 
## Test of Moderators (coefficients 1:4):
## F(df1 = 4, df2 = 88) = 1.5932, p-val = 0.1832
## 
## Model Results:
## 
##                                     estimate      se     tval  df    pval 
## Age_stress_exposureAdolescent        -0.0137  0.1762  -0.0775  88  0.9384 
## Age_stress_exposureAdult              0.1677  0.1140   1.4708  88  0.1449 
## Age_stress_exposureEarly postnatal    0.1067  0.0920   1.1602  88  0.2491 
## Age_stress_exposurePrenatal           0.3179  0.1311   2.4254  88  0.0173 
##                                       ci.lb   ci.ub 
## Age_stress_exposureAdolescent       -0.3639  0.3366    
## Age_stress_exposureAdult            -0.0589  0.3942    
## Age_stress_exposureEarly postnatal  -0.0761  0.2896    
## Age_stress_exposurePrenatal          0.0574  0.5784  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES5) 
##   R2_marginal R2_coditional 
##    0.09276574    0.86630830
Age_stress_ES<-orchard_plot(mod_ES5, mod = "Age_stress_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Age_stress_ES

Stress duration

How long was the stress applied for (chronic = every day for 7 days or more)? This has the highest marginal R2 (currentl nearly 43%) - need to redo without outlier

VCV_ES4 <- impute_covariance_matrix(vi = dat4$lnRRV_ES, cluster = dat4$Study_ID, r = 0.5)

mod_ES6 <-rma.mv(yi = lnRR_ESa, V = VCV_ES4, mod = ~Stress_duration-1, random = list(~1|Study_ID,
                                                                                      ~1|ES_ID,
                                                                                      ~1|Strain),
                 test = "t",
                 data = dat4)
summary(mod_ES6) 
## 
## Multivariate Meta-Analysis Model (k = 89; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -35.9010   71.8020   81.8020   94.1315   82.5427   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0133  0.1155     29     no  Study_ID 
## sigma^2.2  0.0260  0.1611     89     no     ES_ID 
## sigma^2.3  0.0080  0.0895      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 87) = 278.4877, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 87) = 4.3877, p-val = 0.0153
## 
## Model Results:
## 
##                         estimate      se     tval  df    pval    ci.lb   ci.ub 
## Stress_durationAcute     -0.0367  0.0930  -0.3946  87  0.6941  -0.2216  0.1482 
## Stress_durationChronic    0.1854  0.0732   2.5347  87  0.0130   0.0400  0.3308 
##  
## Stress_durationAcute 
## Stress_durationChronic  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES6) 
##   R2_marginal R2_coditional 
##     0.1598311     0.8575918
Duration_ES<- orchard_plot(mod_ES6, mod = "Stress_duration", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Duration_ES

Social enrichment

Does EE also include a manipulation of social environment (i.e., number of individuals in EE relative to control)?

VCV_ES5 <- impute_covariance_matrix(vi = dat5$lnRRV_ES, cluster = dat5$Study_ID, r = 0.5)
mod_ES7<- rma.mv(yi = lnRR_ESa, V = VCV_ES5, mod = ~EE_social-1, random = list(~1|Study_ID,
                                                                                ~1|ES_ID,
                                                                                ~1|Strain),
                 test = "t",
                 data = dat5)

summary(mod_ES7)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -40.1871   80.3742   90.3742  102.8733   91.0885   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0311  0.1762     30     no  Study_ID 
## sigma^2.2  0.0230  0.1518     92     no     ES_ID 
## sigma^2.3  0.0046  0.0676      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 90) = 302.0860, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 90) = 2.4198, p-val = 0.0947
## 
## Model Results:
## 
##                      estimate      se    tval  df    pval    ci.lb   ci.ub 
## EE_socialNon-social    0.0750  0.0807  0.9300  90  0.3549  -0.0853  0.2353    
## EE_socialSocial        0.1720  0.0788  2.1842  90  0.0315   0.0156  0.3285  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES7) 
##   R2_marginal R2_coditional 
##    0.03751906    0.92501263
Social_ES <- orchard_plot(mod_ES7, mod = "EE_social", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Social_ES

Exercise enrichment

Does the form of enrichment include exercise through a running wheel or treadmill?

mod_ES8<- rma.mv(yi = lnRR_ESa, V = VCV_ES, mod = ~EE_exercise-1, random = list(~1|Study_ID, 
    ~1|ES_ID,
    ~1|Strain),
     test = "t",
     data = dat)

summary(mod_ES8)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -40.4968   80.9935   90.9935  103.4926   91.7078   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0325  0.1803     30     no  Study_ID 
## sigma^2.2  0.0230  0.1517     92     no     ES_ID 
## sigma^2.3  0.0065  0.0805      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 90) = 297.3270, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 90) = 1.8401, p-val = 0.1647
## 
## Model Results:
## 
##                         estimate      se    tval  df    pval    ci.lb   ci.ub 
## EE_exerciseExercise       0.1051  0.0780  1.3474  90  0.1812  -0.0499  0.2602 
## EE_exerciseNo exercise    0.1687  0.0967  1.7448  90  0.0844  -0.0234  0.3609 
##  
## EE_exerciseExercise 
## EE_exerciseNo exercise  . 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES8) 
##   R2_marginal R2_coditional 
##    0.01474459    0.89712469
Exercise_ES <- orchard_plot(mod_ES8, mod = "EE_exercise", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Exercise_ES

Order to treatment exposure

What order were animals exposed to stress and EE

mod_ES9 <- rma.mv(yi = lnRR_ESa, V = VCV_ES, mod = ~Exposure_order -1, random = list(~1|Study_ID, 
    ~1|ES_ID,
    ~1|Strain),
     test = "t",
     data = dat)

summary(mod_ES9)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -39.0408   78.0817   90.0817  105.0135   91.1061   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0316  0.1777     30     no  Study_ID 
## sigma^2.2  0.0227  0.1507     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 89) = 292.2561, p-val < .0001
## 
## Test of Moderators (coefficients 1:3):
## F(df1 = 3, df2 = 89) = 3.1546, p-val = 0.0287
## 
## Model Results:
## 
##                                 estimate      se     tval  df    pval    ci.lb 
## Exposure_orderConcurrently        0.1492  0.1208   1.2351  89  0.2201  -0.0909 
## Exposure_orderEnrichment first   -0.1782  0.1659  -1.0744  89  0.2856  -0.5079 
## Exposure_orderStress first        0.1370  0.0526   2.6046  89  0.0108   0.0325 
##                                  ci.ub 
## Exposure_orderConcurrently      0.3893    
## Exposure_orderEnrichment first  0.1514    
## Exposure_orderStress first      0.2414  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES9)
##   R2_marginal R2_coditional 
##     0.1027207     1.0000000
Order_ES <- orchard_plot(mod_ES9, mod = "Exposure_order", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Order_ES 

Age of enrichment

What age were individuals exposed to EE

VCV_ES6 <- impute_covariance_matrix(vi = dat6$lnRRV_ES, cluster = dat6$Study_ID, r = 0.5)

mod_ES10 <- rma.mv(yi = lnRR_ESa, V = VCV_ES6, mod = ~Age_EE_exposure-1, random = list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                 test = "t",
                 data = dat6)

summary(mod_ES10) 
## 
## Multivariate Meta-Analysis Model (k = 88; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -36.6407   73.2813   83.2813   95.5531   84.0313   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0332  0.1822     29     no  Study_ID 
## sigma^2.2  0.0207  0.1439     88     no     ES_ID 
## sigma^2.3  0.0007  0.0265      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 86) = 288.6493, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 86) = 3.1308, p-val = 0.0487
## 
## Model Results:
## 
##                            estimate      se    tval  df    pval    ci.lb 
## Age_EE_exposureAdolescent    0.1284  0.0583  2.2006  86  0.0304   0.0124 
## Age_EE_exposureAdult         0.1247  0.0921  1.3538  86  0.1793  -0.0584 
##                             ci.ub 
## Age_EE_exposureAdolescent  0.2444  * 
## Age_EE_exposureAdult       0.3077    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES10) 
##   R2_marginal R2_coditional 
##  0.0000400928  0.9871095822
Age_enrichment_ES <- orchard_plot(mod_ES10, mod = "Age_EE_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Age_enrichment_ES

Meta-regression: multi-moderator model

dat_ESfm <- dat %>%
  filter(Type_assay %in% c("Recognition", "Habituation", "Conditioning"),
         Type_reinforcement %in% c("Appetitive", "Aversive", "Not applicable"),
         EE_social %in% c("Social", "Non-social"),
         Age_EE_exposure %in% c("Adult", "Adolescent"),
         Type_stress_exposure %in% c("Restraint", "Noise", "MS", "Combination"),
         Stress_duration %in% c("Chronic", "Acute"), 
         Age_stress_exposure %in% c("Prenatal", "Early postnatal", "Adult"))

VCV_ESfm <- impute_covariance_matrix(vi = dat_ESfm$lnRRV_ES, cluster = dat_ESfm$Study_ID, r = 0.5)
                 
mod_ESfm <- rma.mv(yi = lnRR_Sa, V = VCV_ESfm, mod = ~Type_assay-1 + Learning_vs_memory + Type_reinforcement + EE_social + EE_exercise + Age_EE_exposure + Type_stress_exposure + Age_stress_exposure + Stress_duration + Exposure_order, random =   list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                    test = "t",
                 data = dat_ESfm)
#summary(mod_ESfm)
#r2_ml(mod_ESfm) 


res_ESfm <- dredge(mod_ESfm, trace=2)
saveRDS(res_ESfm, file = here("Rdata", "res_ESfm.rds"))
# also saving the full model and data
saveRDS(mod_ESfm, file = here("Rdata", "mod_ESfm.rds"))
saveRDS(dat_ESfm, file = here("Rdata", "dat_ESfm.rds"))
dat_ESfm <- readRDS(file = here("Rdata", "dat_ESfm.rds"))
mod_ESfm <- readRDS(file = here("Rdata", "mod_ESfm.rds"))
res_ESfm <- readRDS(file = here("Rdata", "res_ESfm.rds"))
res_ESfm2<- subset(res_ESfm, delta <= 6, recalc.weights=FALSE)
importance(res_ESfm2) 
##                      Type_assay Stress_duration Age_EE_exposure EE_exercise
## Sum of weights:      0.65       0.63            0.26            0.11       
## N containing models:   19         18               7               6       
##                      Learning_vs_memory EE_social Age_stress_exposure
## Sum of weights:      0.08               0.08      0.05               
## N containing models:    4                  4         2               
##                      Type_stress_exposure Exposure_order
## Sum of weights:      0.03                 0.02          
## N containing models:    2                    1

Publication bias & sensitivity analysis

Publication bias

Funnel_ES<-funnel(mod_ESfm, xlab = "lnRR", ylab = "Standard Error")

Funnel_ES
##               x          y slab
## 1  -0.160575495 0.55343043    1
## 2  -0.274967420 0.70627539    2
## 3  -0.089909776 0.46527295    3
## 4  -0.418899318 0.36571674    4
## 5  -0.402782263 0.53501323    5
## 6  -0.058704027 0.45431531    6
## 7   0.098710844 0.15778981    7
## 8  -0.041805142 0.11742065    8
## 9  -0.234568128 0.44509952    9
## 10  0.473861619 0.42494140   10
## 11 -0.541677850 0.59839672   11
## 12  0.693576469 0.55379623   12
## 13 -0.506062429 0.32349206   13
## 14 -0.434662931 0.29560170   14
## 15 -0.752838102 0.41427250   15
## 16 -0.403661524 0.38640076   16
## 17 -0.074888497 0.14501213   17
## 18 -0.016611205 0.15220818   18
## 19  0.208016615 0.31484827   19
## 20  0.066418083 0.06641794   20
## 21 -0.016988105 0.13630983   21
## 22  0.102490150 0.27788492   22
## 23  0.077633012 0.12209543   23
## 24  0.265433711 0.18060215   24
## 25  0.549569896 0.20386929   25
## 26  0.029904759 0.23531678   26
## 27  0.036477985 0.10420416   27
## 28  0.033071595 0.10236053   28
## 29  0.154387233 0.13226659   29
## 30  0.068347398 0.10358055   30
## 31 -0.066651554 0.06766794   31
## 32  0.180810374 0.14954583   32
## 33 -0.116381761 0.30939379   33
## 34  0.008432825 0.12562534   34
## 35  0.040870354 0.17488050   35
## 36  0.039701546 0.09315773   36
## 37  0.119572535 0.09839159   37
## 38  0.083631129 0.10349806   38
## 39  0.151879757 0.14903289   39
## 40  0.120985979 0.10446433   40
## 41  0.031045847 0.10043162   41
## 42  0.118346228 0.11005125   42
## 43  0.155471736 0.12842298   43
## 44 -0.100968105 0.08874519   44
## 45  0.044022534 0.07229828   45
## 46  0.101114679 0.16689703   46
## 47  0.203773608 0.15471161   47
## 48  0.088563198 0.17334032   48
## 49 -0.005169248 0.17866865   49
## 50  0.434942304 0.36859082   50
## 51  0.554127069 0.73098123   51
## 52 -0.616979476 0.68762297   52
## 53 -0.364466901 0.25174124   53
## 54 -0.304033204 0.52455475   54
## 55  0.230466950 0.44507651   55
## 56 -0.314501446 0.47347958   56
## 57 -0.044976662 0.10592411   57
## 58  0.088941840 0.09196930   58
## 59 -0.100897815 0.73013998   59
## 60  0.510285555 0.59479999   60
## 61  0.451655914 0.34843255   61
## 62  0.479984384 0.70816567   62
## 63 -0.204829048 0.32322526   63
## 64 -0.079911943 0.14926781   64
## 65  0.065631803 0.33253908   65
## 66  0.186788247 0.09926520   66
## 67 -0.216336366 0.15728452   67
## 68 -0.069183632 0.12020637   68
## 69 -0.074226124 0.44173325   69
## 70  0.038912354 0.12603637   70
## 71  0.441601579 1.08275832   71
## 72 -0.149920957 0.21396887   72
## 73  0.302424799 1.01869334   73
## 74  0.205736931 0.09751054   74
#year published was scaled previously under stress PB

dat_ESfm$sqrt_inv_e_n <- with(dat_ESfm, sqrt(1/CC_n + 1/EC_n + 1/ES_n + 1/CS_n))

PB_MR_ES<- rma.mv(lnRR_ESa, lnRRV_ES, mods = ~1 + sqrt_inv_e_n +  Year_published + Learning_vs_memory + Type_assay + Type_reinforcement + EE_social + EE_exercise + Age_stress_exposure, random = list(~1|Study_ID,
                                                          ~1|ES_ID,
                                                          ~1|Strain), method = "REML", test = "t", 
    data = dat_ESfm)

estimates_PB_MR_ES<- estimates.CI(PB_MR_ES)
estimates_PB_MR_ES
##                              estimate         mean        lower      upper
## 1                             intrcpt 29.134353111 -30.75347607 89.0221823
## 2                        sqrt_inv_e_n -0.673866709  -1.71164812  0.3639147
## 3                      Year_published -0.014252207  -0.04395022  0.0154458
## 4            Learning_vs_memoryMemory  0.012836924  -0.11463581  0.1403097
## 5               Type_assayHabituation  0.104424947  -0.43068420  0.6395341
## 6               Type_assayRecognition -0.030511298  -0.47521428  0.4141917
## 7          Type_reinforcementAversive  0.006308661  -0.37520415  0.3878215
## 8    Type_reinforcementNot applicable -0.188090210  -0.76103491  0.3848545
## 9                     EE_socialSocial  0.153788955  -0.17496170  0.4825396
## 10             EE_exerciseNo exercise  0.101557466  -0.21974101  0.4228559
## 11 Age_stress_exposureEarly postnatal  0.104819490  -0.21762872  0.4272677
## 12        Age_stress_exposurePrenatal  0.127578525  -0.32580147  0.5809585
#no effect of inv_effective sample size or year published

Leave-one-out analysis

dat$Study_ID <- as.factor(dat$Study_ID)

LeaveOneOut_effectsize <- list()
for(i in 1:length(levels(dat$Study_ID))){
  LeaveOneOut_effectsize[[i]] <- rma.mv(yi = lnRR_Ea, V = lnRRV_E, 
                                        random = list(~1 | Study_ID,~1| ES_ID, ~1 | Strain), 
                                        method = "REML", data = dat[dat$Study_ID
                                                                    != levels(dat$Study_ID)[i], ])}


# writing function for extracting est, ci.lb, and ci.ub from all models
est.func <- function(mod_E0){
  df <- data.frame(est = mod_E0$b, lower = mod_E0$ci.lb, upper = mod_E0$ci.ub)
  return(df)
}


#using dplyr to form data frame
MA_CVR_ES <- lapply(LeaveOneOut_effectsize, function(x) est.func(x))%>% bind_rows %>% mutate(left_out = levels(dat$Study_ID))

saveRDS(MA_CVR_ES, ,file = here("Rdata", "MA_CVR_ES.rds"))
MA_CVR_ES<- readRDS(here("Rdata", "MA_CVR_ES.rds"))

#telling ggplot to stop reordering factors
MA_CVR_ES$left_out<- as.factor(MA_CVR_ES$left_out)
MA_CVR_ES$left_out<-factor(MA_CVR_ES$left_out, levels = MA_CVR_ES$left_out)

#plotting
leaveoneout_ES <- ggplot(MA_CVR_ES) +
  geom_hline(yintercept = 0, lty = 2, lwd = 1) +
  geom_hline(yintercept = mod_E0$ci.lb, lty = 3, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_E0$b, lty = 1, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_E0$ci.ub, lty = 3, lwd = 0.75, colour = "black") +
  geom_pointrange(aes(x = left_out, y = est, ymin = lower, ymax = upper)) +
  xlab("Study left out") + 
  ylab("lnRR, 95% CI") + 
  coord_flip() +
  theme(panel.grid.minor = element_blank())+
  theme_bw() + theme(panel.grid.major = element_blank()) +
  theme(panel.grid.minor.x = element_blank() ) +
  theme(axis.text.y = element_text(size = 6))

leaveoneout_ES

dat$Study_ID <- as.integer(dat$Study_ID)

Combined orchard plot

mod_list1 <- list(mod_E0, mod_S0, mod_ES0)

mod_res1 <- lapply(mod_list1, function(x) mod_results(x, mod = "Int"))

merged1 <- submerge(mod_res1[[3]], mod_res1[[2]],  mod_res1[[1]], mix = T)
merged1$mod_table$name <- factor(merged1$mod_table$name, levels = c("Intrcpt1", 
    "Intrcpt2", "Intrcpt3"), 
    labels = rev(c("Enrichment ME", "Stress ME", "Interaction")))

merged1$data$moderator <- factor(merged1$data$moderator, levels = c("Intrcpt1", 
    "Intrcpt2", "Intrcpt3"), 
    labels = rev(c("Enrichment ME", "Stress ME", "Interaction")))

orchard1<- orchard_plot(merged1, mod = "Int", xlab = "lnRR", angle = 0) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals 
  xlim(-2,4.5) +
  geom_point(aes(fill = name),  size = 4, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling +
  scale_colour_manual(values = c("#00AEEF","#00A651","#ED1C24"))+ # change colours
  scale_fill_manual(values=c("#00AEEF","#00A651","#ED1C24"))+
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

orchard1

‘Pairwise’ effect sizes

Enrichment relative to control

VCV_E20 <- impute_covariance_matrix(vi = dat$lnRRV_E2, cluster = dat$Study_ID, r = 0.5)

#Model doesn't converge with VCV
mod_E20 <- rma.mv(yi = lnRR_E2a, V = VCV_E20, random = list(~1|Study_ID,
                                                             ~ 1|Strain,
                                                             ~1|ES_ID),
                 test = "t", 
                 data = dat, 
                 control=list(optimizer="optim", optmethod="Nelder-Mead"))

summary(mod_E20) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
##  -7.3235   14.6470   22.6470   32.6904   23.1121   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0037  0.0611     30     no  Study_ID 
## sigma^2.2  0.0000  0.0000      6     no    Strain 
## sigma^2.3  0.0281  0.1675     92     no     ES_ID 
## 
## Test for Heterogeneity:
## Q(df = 91) = 475.8327, p-val < .0001
## 
## Model Results:
## 
## estimate      se    tval  df    pval   ci.lb   ci.ub 
##   0.1066  0.0291  3.6655  91  0.0004  0.0489  0.1644  *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_E20) 
##     I2_total  I2_Study_ID    I2_Strain     I2_ES_ID 
## 8.610789e-01 1.011724e-01 2.607945e-09 7.599065e-01
orchard_plot(mod_E20, mod = "Int", xlab = "lnRR")

Stress relative to control

VCV_S20 <- impute_covariance_matrix(vi = dat$lnRRV_S2, cluster = dat$Study_ID, r = 0.5)

mod_S20 <- rma.mv(yi = lnRR_S2a, V = VCV_S20, random = list(~1|Study_ID, 
                                                             ~ 1|Strain,
                                                             ~1|ES_ID),
                 test = "t",
                 data = dat)

summary(mod_S20) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -52.3561  104.7122  112.7122  122.7557  113.1773   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0323  0.1797     30     no  Study_ID 
## sigma^2.2  0.0000  0.0000      6     no    Strain 
## sigma^2.3  0.0798  0.2824     92     no     ES_ID 
## 
## Test for Heterogeneity:
## Q(df = 91) = 1003.0694, p-val < .0001
## 
## Model Results:
## 
## estimate      se     tval  df    pval    ci.lb    ci.ub 
##  -0.1846  0.0522  -3.5360  91  0.0006  -0.2882  -0.0809  *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_S20) 
##     I2_total  I2_Study_ID    I2_Strain     I2_ES_ID 
## 9.489604e-01 2.734220e-01 9.879730e-10 6.755384e-01
orchard_plot(mod_S20, mod = "Int", xlab = "lnRR")

Enrichment + stress relative to control

VCV_ES20 <- impute_covariance_matrix(vi = dat$lnRRV_ES2, cluster = dat$Study_ID, r = 0.5)

mod_ES20 <- rma.mv(yi = lnRR_ES2a, V = VCV_ES20, random = list(~1|Study_ID,
                                                                ~ 1|Strain,
                                                                ~1|ES_ID),
                 test = "t",
                 data = dat)
summary(mod_ES20) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -10.3625   20.7250   28.7250   38.7684   29.1901   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0039  0.0625     30     no  Study_ID 
## sigma^2.2  0.0014  0.0377      6     no    Strain 
## sigma^2.3  0.0227  0.1508     92     no     ES_ID 
## 
## Test for Heterogeneity:
## Q(df = 91) = 402.2656, p-val < .0001
## 
## Model Results:
## 
## estimate      se    tval  df    pval   ci.lb   ci.ub 
##   0.0801  0.0389  2.0594  91  0.0423  0.0028  0.1573  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_ES20) 
##    I2_total I2_Study_ID   I2_Strain    I2_ES_ID 
##  0.81513970  0.11355557  0.04132363  0.66026050
orchard_plot(mod_ES20, mod = "Int", xlab = "lnRR")

Enrichment + stress relative to stress

VCV_E30 <- impute_covariance_matrix(vi = dat$lnRRV_E3, cluster = dat$Study_ID, r = 0.5)

mod_E30 <- rma.mv(yi = lnRR_E3a, V = VCV_E30, random = list(~1|Study_ID,
                                                             ~ 1|Strain,
                                                             ~1|ES_ID),
                  test = "t",
                  data = dat)
summary(mod_E30) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -46.3447   92.6895  100.6895  110.7329  101.1546   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0235  0.1532     30     no  Study_ID 
## sigma^2.2  0.0263  0.1623      6     no    Strain 
## sigma^2.3  0.0543  0.2331     92     no     ES_ID 
## 
## Test for Heterogeneity:
## Q(df = 91) = 790.0249, p-val < .0001
## 
## Model Results:
## 
## estimate      se    tval  df    pval   ci.lb   ci.ub 
##   0.2465  0.1011  2.4389  91  0.0167  0.0457  0.4472  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_E30) 
##    I2_total I2_Study_ID   I2_Strain    I2_ES_ID 
##   0.9456920   0.2132063   0.2391809   0.4933048
orchard_plot(mod_E30, mod = "Int", xlab = "lnRR")

Enrichment + stress relative to enrichment

VCV_S30 <- impute_covariance_matrix(vi = dat$lnRRV_S3, cluster = dat$Study_ID, r = 0.5)

mod_S30 <- rma.mv(yi = lnRR_S3a, V = VCV_S30, random = list(~1|Study_ID,
                                                             ~ 1|Strain,
                                                             ~1|ES_ID),
                  test = "t",
                  data = dat,
                   control=list(optimizer="optim", optmethod="Nelder-Mead"))
summary(mod_S30) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
##  -6.8788   13.7576   21.7576   31.8011   22.2228   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0000  0.0000     30     no  Study_ID 
## sigma^2.2  0.0009  0.0292      6     no    Strain 
## sigma^2.3  0.0276  0.1662     92     no     ES_ID 
## 
## Test for Heterogeneity:
## Q(df = 91) = 540.3522, p-val < .0001
## 
## Model Results:
## 
## estimate      se     tval  df    pval    ci.lb   ci.ub 
##  -0.0087  0.0342  -0.2552  91  0.7992  -0.0766  0.0592    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_S30) 
##     I2_total  I2_Study_ID    I2_Strain     I2_ES_ID 
## 8.415428e-01 5.192043e-09 2.515933e-02 8.163835e-01
orchard_plot(mod_S30, mod = "Int", xlab = "lnRR")

Combined orchard plot

mod_list2 <- list(mod_S30, mod_E30, mod_ES20, mod_S20, mod_E20) #rearranged the order so that it matches intext results

mod_res2 <- lapply(mod_list2, function(x) mod_results(x, mod = "Int"))

merged2 <- submerge(mod_res2[[1]], mod_res2[[2]],  mod_res2[[3]], mod_res2[[4]],  mod_res2[[5]], mix = T)

merged2$mod_table$name <- factor(merged2$mod_table$name, levels = c("Intrcpt1", 
    "Intrcpt2", "Intrcpt3", "Intrcpt4", "Intrcpt5"), 
    labels = rev(c("EC/CC", "CS/CC", "ES/CC", "ES/CS", "ES/EC")))

merged2$data$moderator <- factor(merged2$data$moderator, levels = c("Intrcpt1", 
    "Intrcpt2", "Intrcpt3", "Intrcpt4", "Intrcpt5"), 
    labels = rev(c("EC/CC", "CS/CC", "ES/CC", "ES/CS", "ES/EC")))

orchard2 <- orchard_plot(merged2, mod = "Int", xlab = "lnRR", angle = 0) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals 
  xlim(-2,4.5) +
  geom_point(aes(fill = name),  size = 4, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  scale_colour_manual(values = c("#7B81BE","#D7DF23","#F37158","#75CBF2","#97D2B4"))+ # change colours
  scale_fill_manual(values=c("#7B81BE","#D7DF23","#F37158","#75CBF2","#97D2B4"))+
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

orchard2

Panel of ‘focal’ ES and ‘pairwise’ ES orchard plots

p1 <- orchard1 + orchard2 +  plot_annotation(tag_levels = 'A')
p1

#saved as PDF: 6 x 15 inches

Panel of meta-regressions

Environmental enrichment

#Enrichment
E_mod <- (LvsM_E + Learning_E + Reinforcement_E)/ (Age_E + Exercise_E + Social_E) +  plot_annotation(tag_levels = 'A')

E_mod

#saved as pdf 10 x 15 inches

Stress

S_mod <- (LvsM_S + Learning_S + Reinforcement_S) / (Age_S + Stressor + Duration_S) + plot_annotation(tag_levels = 'A')

S_mod

#saved as pdf 10 x 15 inches

Interaction

ES_mod <- plot_grid(LvsM_ES, Learning_ES, Reinforcement_ES, Age_enrichment_ES, Age_stress_ES, Order_ES, Exercise_ES, Social_ES, Stressor_ES, Duration_ES,
  labels = "AUTO", ncol = 5)

ES_mod

#saved as 10 x 20 inches

Panel of funnel plots

## quartz_off_screen 
##                 2

Modelling with SMD

Environmental Enrichment

Intercept model

# TODO 
#Why does the funnel plot look so strange and why is I2 higher?

mod_E0a <- rma.mv(yi = SMD_Ea, V = VCV_Ea, random = list(~1|Study_ID,
                                                         ~1|ES_ID, 
                                                         ~1|Strain),
                 test = "t",
                 data = dat)
summary(mod_E0a)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##    logLik   Deviance        AIC        BIC       AICc 
## -112.9351   225.8701   233.8701   243.9136   234.3353   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0000  0.0000     30     no  Study_ID 
## sigma^2.2  0.0000  0.0000     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Heterogeneity:
## Q(df = 91) = 17.8254, p-val = 1.0000
## 
## Model Results:
## 
## estimate      se     tval  df    pval    ci.lb   ci.ub 
##  -0.0249  0.0995  -0.2504  91  0.8028  -0.2226  0.1728    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_E0a)
##     I2_total  I2_Study_ID     I2_ES_ID    I2_Strain 
## 8.598505e-10 8.994874e-14 0.000000e+00 8.597605e-10
funnel(mod_E0a, yaxis="seinv")

# VCV matrix
VCV_Efm <- impute_covariance_matrix(vi = dat_Efm$lnRRV_E, cluster = dat_Efm$Study_ID, r = 0.5)

mod_E0b <- rma.mv(yi = SMD_Ea, V = VCV_Efm, mod = ~Type_assay-1 + Learning_vs_memory-1 + Type_reinforcement-1 + EE_social-1 + EE_exercise-1 + Age_EE_exposure-1, random = list(~1|Study_ID,~1|ES_ID,~1|Strain),
                 test = "t",
                 data = dat_Efm)

funnel(mod_E0b, yaxis="seinv")

Stress

Intercept model

mod_S0a <- rma.mv(yi = SMD_Sa, V = VCV_Sa, random = list(~1|Study_ID,
                                                         ~1|ES_ID,
                                                         ~1|Strain),
                test = "t",
                data = dat)
summary(mod_S0a) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##    logLik   Deviance        AIC        BIC       AICc 
## -134.4163   268.8326   276.8326   286.8760   277.2977   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0000  0.0001     30     no  Study_ID 
## sigma^2.2  0.0809  0.2845     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Heterogeneity:
## Q(df = 91) = 64.9393, p-val = 0.9823
## 
## Model Results:
## 
## estimate      se     tval  df    pval    ci.lb   ci.ub 
##  -0.0959  0.1193  -0.8040  91  0.4235  -0.3328  0.1410    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_S0a) 
##     I2_total  I2_Study_ID     I2_ES_ID    I2_Strain 
## 9.843664e-02 6.086831e-09 9.843664e-02 4.598665e-12
funnel(mod_S0a, yaxis="seinv")

# VCV matrix
VCV_Sfm <- impute_covariance_matrix(vi = dat_Sfm$lnRRV_E, cluster = dat_Sfm$Study_ID, r = 0.5)

#assessing funnel on model model
mod_S0b <- rma.mv(yi = SMD_Sa, V = VCV_Sfm, mod = ~ Type_assay -1 + Learning_vs_memory + Type_reinforcement + Type_stress_exposure + Age_stress_exposure + Stress_duration, random =   list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                    test = "t",
                 data = dat_Sfm)
funnel(mod_S0b, yaxis="seinv")

Interaction

Intercept model

mod_ES0a <- rma.mv(yi = SMD_ESa, V = VCV_ESa, random = list(~1|Study_ID,
                                                            ~1|ES_ID,
                                                            ~1|Strain),
                  test = "t",
                  data = dat)
summary(mod_ES0a)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##    logLik   Deviance        AIC        BIC       AICc 
## -127.2849   254.5699   262.5699   272.6133   263.0350   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.5212  0.7219     30     no  Study_ID 
## sigma^2.2  0.3989  0.6316     92     no     ES_ID 
## sigma^2.3  0.0000  0.0002      6     no    Strain 
## 
## Test for Heterogeneity:
## Q(df = 91) = 283.7373, p-val < .0001
## 
## Model Results:
## 
## estimate      se    tval  df    pval   ci.lb   ci.ub 
##   0.7126  0.1825  3.9042  91  0.0002  0.3500  1.0751  *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_ES0a) 
##     I2_total  I2_Study_ID     I2_ES_ID    I2_Strain 
## 6.899076e-01 3.908063e-01 2.991013e-01 1.910481e-08
funnel(mod_ES0a, yaxis="seinv")

# VCV matrix
VCV_ESfm <- impute_covariance_matrix(vi = dat_ESfm$lnRRV_E, cluster = dat_ESfm$Study_ID, r = 0.5)

mod_ES0b <- rma.mv(yi = SMD_Sa, V = VCV_ESfm, mod = ~Type_assay-1 + Learning_vs_memory + Type_reinforcement + EE_social + EE_exercise + Age_EE_exposure + Type_stress_exposure + Age_stress_exposure + Stress_duration + Exposure_order, random =   list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                    test = "t",
                 data = dat_ESfm)

funnel(mod_ES0b, yaxis="seinv")

---
title: "Stress x EE MA"
authors: "Erin L Macartney, Malgorzata Lagisz, Shinichi Nakagawa"
subtitle: Supplementary Material
output: 
    
    rmdformats::readthedown:
      code_folding: hide
      code_download: true
editor_options: 
  chunk_output_type: console
  # output:
#   html_document:
#     code_folding: hide
#     df_print: kable
#     toc: TRUE
#     toc_float: TRUE
#     toc_depth: 4
#     collapsed: FALSE
#     YAML: rmdformats::robobook
---

```{r, include = FALSE}
knitr::opts_chunk$set(
message = FALSE,
warning = FALSE,
cache = TRUE, 
tidy = TRUE, 
echo = TRUE
)

rm(list = ls())
```

# Setting-up

## Loading packages

```{r}
pacman::p_load(tidyverse, 
               here,
               metafor,
               clubSandwich,
               orchaRd, 
               MuMIn, 
               patchwork,
               GoodmanKruskal,
               networkD3,
               ggplot2,
               visdat,
               ggalluvial,
               ggthemr,
               cowplot)
# needed for model selection using MuMIn within metafor
eval(metafor:::.MuMIn)

```

## Loading data and functions

```{r}
dat <- read_csv(here("Data","Data_raw.csv"))
# Load custom function to extract data 
source(here("R/Functions.R")) 
```

# Data exploration

## General  
``` {r, message = FALSE, warning = FALSE, eval = FALSE}
#Number of effect sizes
length(unique(dat$ES_ID))  

#Number of studies
length(unique(dat$Study_ID))

#Publication years
min(dat$Year_published) 
max(dat$Year_published)
```

## Explore associations among predictor variables   

```{r, results = 'hide'}
plot_missing <- vis_miss(dat) +
  theme(plot.title = element_text(hjust = 0.5, vjust = 3), 
        plot.margin = margin(t = 0.5, r = 3, b = 1, l = 1, unit = "cm")) +
  ggtitle("Missing data in the selected predictors") #no missing values

plot_missing
#useGoodman and Kruskal’s τ measure of association between categorical predictor variables (function from package GoodmanKruskal: https://cran.r-project.org/web/packages/GoodmanKruskal/vignettes/GoodmanKruskal.html)
#GKmatrix <- GKtauDataframe(subset(dat, select = c("Sex", "Type_assay", "Learning_vs_memory", #"Type_reinforcement",  "Type_stress_exposure", "Age_stress_exposure", "Stress_duration", #"EE_social_HR", "EE_exercise", "Age_EE_exposure", "Exposure_order", "Age_assay")))
#plot(GKmatrix)

#simple pairwise contingency tables
# table(dat$Type_assay, dat$Type_reinforcement) 
# table(dat$Age_stress_exposure, dat$Age_EE_exposure) 
# table(dat$Type_stress_exposure, dat$Age_stress_exposure)
# table(dat$Type_stress_exposure, dat$Age_assay)
# table(dat$Type_stress_exposure, dat$Stress_duration)
```

## Alluvial diagrams 

```{r redo and assemble plots , results = 'hide'}

#A. subjects info: species-strain-sex

freq_A <- as.data.frame(table(dat$Sex, dat$Common_species, dat$Strain)) %>% rename(Sex = Var1, Species = Var2, Strain = Var3) #make a data frame of frequencies for three selected variables
is_alluvia_form(as.data.frame(freq_A), axes = 1:3, silent = TRUE)

p1 <- ggplot(data = freq_A,
  aes(axis1 = Sex, axis2 = Species, axis3 = Strain, y = Freq)) +
  geom_alluvium(aes(fill = Sex)) +
  geom_flow() +
  geom_stratum(aes(fill = Sex)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
  #theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold"),
        plot.margin = unit(c(1, 1, 0, 1), "cm")) +
  scale_x_discrete(limits = c("Sex", "Species", "Strain"), expand = c(0.15, 0.05), position = "top") +
  ggtitle("A  study subjects")

p1

#B. EE info: type-exercise-social EE

freq_B <- as.data.frame(table(dat$Type_EE_exposure, dat$EE_exercise, dat$EE_social)) %>% rename(Type_EE = Var1, EE_exercise = Var2, EE_social = Var3) #make a data frame of frequencies for three selected variables
is_alluvia_form(as.data.frame(freq_B), axes = 1:3, silent = TRUE)

p2 <- ggplot(data = freq_B,
  aes(axis1 = Type_EE, axis2 = EE_exercise, axis3 = EE_social, y = Freq)) +
  geom_alluvium(aes(fill = Type_EE)) +
  geom_flow() +
  geom_stratum(aes(fill = Type_EE)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
  #theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold"),
        plot.margin = unit(c(1, 1, 0, 1), "cm")) +
  scale_x_discrete(limits = c("Type", "Exercise", "Social"), expand = c(0.1, 0.1), position = "top") +
  ggtitle("C  environmental enrichment")

p2

#C. stress info: age-duration-type stress

freq_C <- as.data.frame(table(dat$Age_stress_exposure, dat$Stress_duration, dat$Type_stress_exposure)) %>% rename(Age_stress = Var1, Duration_stress = Var2, Type_stress = Var3) #make a data frame of frequencies for three selected variables
is_alluvia_form(as.data.frame(freq_C), axes = 1:3, silent = TRUE)

p3 <- ggplot(data = freq_C,
  aes(axis1 = Age_stress, axis2 = Duration_stress, axis3 = Type_stress, y = Freq)) +
  geom_alluvium(aes(fill = Age_stress)) +
  geom_flow() +
  geom_stratum(aes(fill = Age_stress)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
  #theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold"),
        plot.margin = unit(c(1, 1, 0, 1), "cm")) +
  scale_x_discrete(limits = c("Age", "Duration", "Type"), expand = c(0.1, 0.1), position = "top") +
  ggtitle("B  stress exposure")

p3

#D. assay info: L/M-type-reinforcement

freq_D <- as.data.frame(table(dat$Learning_vs_memory, dat$Type_assay, dat$Type_reinforcement)) %>% rename(Learning_Memory = Var1, Type = Var2, Reinforcement = Var3) #make a data frame of frequencies for three selected variables
is_alluvia_form(as.data.frame(freq_D), axes = 1:3, silent = TRUE)

p4 <- ggplot(data = freq_D,
  aes(axis1 = Learning_Memory, axis2 = Type, axis3 = Reinforcement, y = Freq)) +
  geom_alluvium(aes(fill = Learning_Memory)) +
  geom_flow() +
  geom_stratum(aes(fill = Learning_Memory)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
  #theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold"),
        plot.margin = unit(c(1, 1, 0, 1), "cm")) +
  scale_x_discrete(limits = c("Learning_Memory", "Type", "Reinforcement"), expand = c(0.1, 0.1), position = "top") +
  ggtitle("D  cognitive assay")

p4

#p1 + scale_fill_brewer(palette = "Set3") #Pastel1

(p1 + scale_fill_brewer(palette = "Set3")) / (p2 + scale_fill_brewer(palette = "Set3")) / (p3 + scale_fill_brewer(palette = "Set3")) / (p4 + scale_fill_brewer(palette = "Set3")) + plot_layout(ncol = 1, heights = c(1,1,1,1,1))

#ggsave(file = "./figs/Alluvial_diagrams_v0.pdf", width = 10, height = 12, units = "cm", dpi = 300, scale = 2, device = cairo_pdf)
```

# Data organisation
Removing study with negative values, getting effect sizes from function, 'flipping' effect sizes so that all effect sizes are higher values = individuals do better and learning/memory, shifting negative values to positive as lnRR cannot use negative values, assigining human readable terms, and creating VCV of variance 

```{r}
#this code is to process the data using "Data_raw". Processed data after this step was loaded above
#removing study with negative values as these are unable to be used for lnRR
dat <- droplevels(dat[!dat$First_author == 'Wang',])

# #Getting effect sizes
# effect_size <- effect_set(CC_n = "CC_n", CC_mean = "CC_mean", CC_SD = "CC_SD",
#                           EC_n = "EC_n", EC_mean = "EC_mean" , EC_SD ="EC_SD",
#                           CS_n = "CS_n", CS_mean = "CS_mean", CS_SD = "CS_SD",
#                           ES_n = "ES_n", ES_mean = "ES_mean", ES_SD = "ES_SD",
#                           data = dat)
# #'pure' effect sizes
# effect_size2 <- effect_set2(CC_n = "CC_n", CC_mean = "CC_mean", CC_SD = "CC_SD",
#                           EC_n = "EC_n", EC_mean = "EC_mean" , EC_SD ="EC_SD",
#                           CS_n = "CS_n", CS_mean = "CS_mean", CS_SD = "CS_SD",
#                           ES_n = "ES_n", ES_mean = "ES_mean", ES_SD = "ES_SD",
#                           data = dat) 
#rounding down integer sample sizes 
dat$CC_n <- floor(dat$CC_n)
dat$EC_n <- floor(dat$EC_n)
dat$CS_n <- floor(dat$CS_n)
dat$ES_n <- floor(dat$CS_n)

# 'Focal' effect_size 
effect_size <- with(dat, mapply(effect_set, 
                      CC_n ,
                      CC_mean, 
                      CC_SD,
                      EC_n, 
                      EC_mean, 
                      EC_SD,
                      CS_n, 
                      CS_mean, 
                      CS_SD,
                      ES_n, 
                      ES_mean, 
                      ES_SD,
                      percent = Response_percent,
                      SIMPLIFY = FALSE))
effect_size <- map_dfr(effect_size, I)

# 'Pairwise' effect size

 effect_size2 <- with(dat, mapply(effect_set2, 
                      CC_n ,
                      CC_mean, 
                      CC_SD,
                      EC_n, 
                      EC_mean, 
                      EC_SD,
                      CS_n, 
                      CS_mean, 
                      CS_SD,
                      ES_n, 
                      ES_mean, 
                      ES_SD,
                      percent = Response_percent,
                      SIMPLIFY = FALSE))
effect_size2 <- map_dfr(effect_size2, I)

#Removing missing effect sizes
dim(dat)
full_info <- which(complete.cases(effect_size) == TRUE)

# adding effect sizes as column
dat <- bind_cols(dat, effect_size, effect_size2)
dat <- dat[full_info, ]

#Flipping 'lower is better' effect sizes
#flipping lnRR for values where higher = worse
dat$lnRR_Ea <- ifelse(dat$Response_direction == 2, dat$lnRR_E*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_E))
# currently NAswhich causes error
dat$lnRR_Sa  <- ifelse(dat$Response_direction == 2, dat$lnRR_S*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_S)) # currently NAswhich causes error
dat$lnRR_ESa <-  ifelse(dat$Response_direction == 2, dat$lnRR_ES*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_ES)) # currently NAswhich causes error
#flipping 'pure effect sizes'
dat$lnRR_E2a <- ifelse(dat$Response_direction == 2, dat$lnRR_E2*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_E2)) # currently NAswhich causes error
dat$lnRR_S2a  <- ifelse(dat$Response_direction == 2, dat$lnRR_S2*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_S2)) # currently NAswhich causes error
dat$lnRR_ES2a <-  ifelse(dat$Response_direction == 2, dat$lnRR_ES2*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_ES2)) # currently NAswhich causes error
dat$lnRR_E3a <-  ifelse(dat$Response_direction == 2, dat$lnRR_E3*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_E3)) # currently NAswhich causes error
dat$lnRR_S3a <-  ifelse(dat$Response_direction == 2, dat$lnRR_S3*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_S3)) # currently NAswhich causes error

#flipping SMD
dat$SMD_Ea <- ifelse(dat$Response_direction == 2, dat$SMD_E*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$SMD_E)) # currently NAswhich causes error
dat$SMD_Sa  <- ifelse(dat$Response_direction == 2, dat$SMD_S*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$SMD_S)) # currently NAswhich causes error
dat$SMD_ESa <-  ifelse(dat$Response_direction == 2, dat$SMD_ES*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$SMD_ES))

#assigning human readable terms
dat <- dat %>% mutate(Type_assay = case_when(Type_assay == 1 ~ "Habituation",
                                                Type_assay == 2 ~ "Conditioning",
                                                Type_assay == 3 ~ "Recognition", 
                                                Type_assay == 4 ~ "Unclear"),
                      Learning_vs_memory = case_when(Learning_vs_memory == 1 ~ "Learning",
                                                     Learning_vs_memory == 2 ~ "Memory", 
                                                     Learning_vs_memory == 3 ~ "Habituation"),
                      Type_reinforcement = case_when(Type_reinforcement== 1 ~"Appetitive",
                                                         Type_reinforcement== 2 ~ "Aversive",
                                                         Type_reinforcement== 3 ~ "Not applicable",
                                                         Type_reinforcement== 4 ~ "Unclear"),
                      Type_stress_exposure = case_when(Type_stress_exposure == 1 ~ "Density",
                                                       Type_stress_exposure == 2 ~ "Scent",
                                                       Type_stress_exposure == 3 ~ "Shock",
                                                       Type_stress_exposure == 4 ~ "Exertion",
                                                       Type_stress_exposure == 5 ~ "Restraint",
                                                       Type_stress_exposure == 6 ~ "MS",
                                                       Type_stress_exposure == 7 ~ "Circadian rhythm",
                                                       Type_stress_exposure == 8 ~ "Noise",
                                                       Type_stress_exposure == 9 ~ "Other",
                                                       Type_stress_exposure == 10 ~ "Combination",
                                                       Type_stress_exposure == 11 ~ "unclear"), 
                      Age_stress_exposure = case_when(Age_stress_exposure == 1 ~ "Prenatal",
                                                      Age_stress_exposure == 2 ~ "Early postnatal",
                                                      Age_stress_exposure == 3 ~ "Adolescent",
                                                      Age_stress_exposure == 4 ~ "Adult",
                                                      Age_stress_exposure == 5 ~ "Unclear"),
                      Stress_duration = case_when(Stress_duration == 1 ~ "Acute",
                                                  Stress_duration == 2 ~ "Chronic",
                                                  Stress_duration == 3 ~ "Intermittent",
                                                  Stress_duration == 4 ~ "Unclear"),
                      EE_social = case_when(EE_social == 1 ~ "Social",
                                            EE_social== 2 ~ "Non-social", 
                                            EE_social == 3 ~ "Unclear"), 
                      EE_exercise = case_when(EE_exercise == 1 ~ "Exercise", 
                                              EE_exercise == 2 ~ "No exercise"),
                      Age_EE_exposure = case_when(Age_EE_exposure == 1 ~ "Prenatal", 
                                                  Age_EE_exposure == 2 ~ "Early postnatal",
                                                  Age_EE_exposure == 3 ~ "Adolescent", 
                                                  Age_EE_exposure == 4 ~ "Adult",
                                                  Age_EE_exposure == 5 ~ "Unclear"),
                      Exposure_order = case_when(Exposure_order == 1 ~ "Stress first",
                                                      Exposure_order == 2 ~ "Enrichment first",
                                                      Exposure_order == 3 ~ "Concurrently", 
                                                      Exposure_order == 4 ~ "Unclear"),
                      Age_assay = case_when(Age_assay == 1 ~ "Early postnatal",
                                            Age_assay == 2 ~ "Adolescent",
                                            Age_assay == 3 ~ "Adult", 
                                            Age_assay == 4 ~ "Unclear"),
                      Sex = case_when(Sex == 1 ~ "Female", 
                                      Sex == 2 ~ "Male", 
                                      Sex == 3 ~ "Mixed", 
                                      Sex == 4 ~ "Unclear"),
                      Type_EE_exposure = case_when(Type_EE_exposure == 1 ~ "Nesting material",
                                                      Type_EE_exposure == 2 ~ "Objects",
                                                      Type_EE_exposure == 3 ~ "Cage complexity", 
                                                      Type_EE_exposure == 4 ~ "Wheel/trademill",
                                                      Type_EE_exposure == 5 ~ "Combination",
                                                      Type_EE_exposure == 6 ~ "Other", 
                                                      Type_EE_exposure == 7 ~ "Unclear"))

#making variance VCVs
VCV_E <- impute_covariance_matrix(vi = dat$lnRRV_E, cluster = dat$Study_ID, r = 0.5)
VCV_S <- impute_covariance_matrix(vi = dat$lnRRV_S, cluster = dat$Study_ID, r = 0.5)
VCV_ES <- impute_covariance_matrix(vi = dat$lnRRV_ES, cluster = dat$Study_ID, r = 0.5)

VCV_Ea <- impute_covariance_matrix(vi = dat$SMDV_E, cluster = dat$Study_ID, r = 0.5)
VCV_Sa <- impute_covariance_matrix(vi = dat$SMDV_S, cluster = dat$Study_ID, r = 0.5)
VCV_ESa <- impute_covariance_matrix(vi = dat$SMDV_ES, cluster = dat$Study_ID, r = 0.5)

#write.csv(dat, file = here("Data", 'Data_processed.csv'), row.names = TRUE)
```

# Modelling with lnRR

## Environmental enrichment

### Meta-analysis
Learning and memory are significantly improved when housed with environmnetal enrichment

``` {r, message = FALSE, warning = FALSE}
#dat <- read_csv(here("Data","Data_processed.csv"))

mod_E0 <- rma.mv(yi = lnRR_Ea, V = VCV_E, random = list(~1|Study_ID,
                                                          ~1|ES_ID,
                                                          ~1|Strain),
                 test = "t",
                 data = dat)
summary(mod_E0) 
i2_ml(mod_E0) 

orchard_plot(mod_E0, mod = "Int", xlab = "lnRR", alpha=0.4) +  # Orchard plot 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5)+ # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2)+ # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_colour_manual(values = "darkorange")+ # change colours
  scale_fill_manual(values="darkorange")+ 
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 24), # change font sizes
        legend.title = element_text(size = 15),
        legend.text = element_text(size = 13)) 
```

### Meta-regression: uni-moderator {.tabset}

#### Type of assay
The type of learning/memory response

``` {r, message = FALSE, warning = FALSE,  fig.width=10, fig.height=7}
dat1 <- filter(dat, Type_assay %in% c("Recognition", "Habituation", "Conditioning"))
VCV_E1 <- impute_covariance_matrix(vi = dat1$lnRRV_E, cluster = dat$Study_ID, r = 0.5)

mod_E1 <- rma.mv(yi = lnRR_Ea, V = VCV_E1, mod = ~Type_assay-1, random = list(~1|Study_ID,
                                                                                  ~1|ES_ID,
                                                                                  ~1|Strain),
                 test = "t",
                 data = dat1)

summary(mod_E1)
r2_ml(mod_E1) 

Learning_E <- orchard_plot(mod_E1, mod = "Type_assay", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Learning_E
```

#### Learning vs Memory
Is the assay broadly measuring learning or memory? 

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
mod_E2 <-  rma.mv(yi = lnRR_Ea, V = VCV_E, mod = ~Learning_vs_memory-1, random = list(~1|Study_ID,
                                                                                        ~1|ES_ID,
                                                                                        ~1|Strain),
                  test = "t",
                  data = dat)

summary(mod_E2) 
r2_ml(mod_E2) 

LvsM_E<- orchard_plot(mod_E2, mod = "Learning_vs_memory", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

LvsM_E
```

#### Type of reinforcement 
The type of cue used 

``` {r, message = FALSE, warning = FALSE,  fig.width=10, fig.height=7}

dat2 <- filter(dat, Type_reinforcement %in% c("Appetitive", "Aversive", "Not applicable"))
VCV_E2 <- impute_covariance_matrix(vi = dat2$lnRRV_E, cluster = dat2$Study_ID, r = 0.5)
mod_E3 <- rma.mv(yi = lnRR_Ea, V = VCV_E2, mod = ~ Type_reinforcement-1, random = list(~1|Study_ID,
                                                                                            ~1|ES_ID,
                                                                                            ~1|Strain),
                 test = "t",
                 data = dat2)

summary(mod_E3)
r2_ml(mod_E3) 

Reinforcement_E <-orchard_plot(mod_E3, mod = "Type_reinforcement", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Reinforcement_E
````

#### Social enrichment
Does EE also include a manipulation of social environment? Note that we excluded any studies that exclusively used social enrichment.s

``` {r, message = FALSE, warning = FALSE,  fig.width=10, fig.height=7}
dat5 <- filter(dat, EE_social %in% c("Social", "Non-social"))
VCV_E5 <- impute_covariance_matrix(vi = dat5$lnRRV_E, cluster = dat$Study_ID, r = 0.5)
  
mod_E4<- rma.mv(yi = lnRR_Ea, V = VCV_E5, mod = ~EE_social-1, random = list(~1|Study_ID, 
                                                                             ~1|ES_ID,
                                                                             ~1|Strain),
                test = "t",data = dat5)

summary(mod_E4)
r2_ml(mod_E4) 

Social_E <-orchard_plot(mod_E4, mod = "EE_social", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Social_E 
```

#### Exercise enrichment
Does the form of enrichment include exercise through a running wheel or treadmill?

``` {r, message = FALSE, warning = FALSE,  fig.width=10, fig.height=7}
mod_E5<- rma.mv(yi = lnRR_Ea, V = VCV_E, mod = ~EE_exercise-1, random = list(~1|Study_ID,
                                                                               ~1|ES_ID,
                                                                               ~1|Strain),
                test = "t",
                data = dat)

summary(mod_E5)
r2_ml(mod_E5) 
 
Exercise_E <-orchard_plot(mod_E5, mod = "EE_exercise", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Exercise_E
```

#### Age of enrichment
The age at which the individuals were exposed to environmental enrichment.
``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
dat6 <- filter(dat, Age_EE_exposure %in% c("Adult", "Adolescent"))
VCV_E6 <- impute_covariance_matrix(vi = dat6$lnRRV_E, cluster = dat6$Study_ID, r = 0.5)


mod_E6 <- rma.mv(yi = lnRR_Ea, V = VCV_E6, mod = ~Age_EE_exposure-1, random = list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                 test = "t",
                 data = dat6)

summary(mod_E6) 
r2_ml(mod_E6) 

Age_E<- orchard_plot(mod_E6, mod = "Age_EE_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10))  

Age_E
```

###  Meta-regression: multi-moderator model

``` {r, message = FALSE, warning = FALSE, eval = FALSE}

# filter data so that all K < 5 are removed
dat_Efm <- dat %>%
  filter(Type_assay %in% c("Recognition", "Habituation", "Conditioning"),
         Type_reinforcement %in% c("Appetitive", "Aversive", "Not applicable"),
         EE_social %in% c("Social", "Non-social"), 
         Age_EE_exposure %in% c("Adult", "Adolescent"))

VCV_Efm <- impute_covariance_matrix(vi = dat_Efm$lnRRV_E, cluster = dat_Efm$Study_ID, r = 0.5)
                 
mod_Efm <- rma.mv(yi = lnRR_Sa, V = VCV_Efm, mod = ~Type_assay-1 + Learning_vs_memory + Type_reinforcement + EE_social + EE_exercise + Age_EE_exposure , random =   list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                    test = "t",
                 data = dat_Efm)
#summary(mod_Efm)
#r2_ml(mod_Efm) 

res_Efm <- dredge(mod_Efm, trace=2)
saveRDS(res_Efm, file = here("Rdata", "res_Efm.rds"))
# also saving the full model and data
saveRDS(mod_Efm, file = here("Rdata", "mod_Efm.rds"))
saveRDS(dat_Efm, file = here("Rdata", "dat_Efm.rds"))
```

```{r, message = FALSE, warning = FALSE}
dat_Efm <- readRDS(file = here("Rdata", "dat_Efm.rds"))
mod_Efm <- readRDS(file = here("Rdata", "mod_Efm.rds"))
res_Efm <- readRDS(file = here("Rdata", "res_Efm.rds"))
res_Efm2<- subset(res_Efm, delta <= 6, recalc.weights=FALSE)
importance(res_Efm2)
```

### Publication bias & sensitivity analysis

#### Publication bias tests
``` {r, message = FALSE, warning = FALSE}

# funnel plot
Funnel_E<-funnel(mod_Efm, xlab = "lnRR", ylab = "Standard Error")
Funnel_E
#year published was scaled previously  under stress PB

dat_Efm$sqrt_inv_e_n <- with(dat_Efm, sqrt(1/CC_n + 1/EC_n + 1/ES_n + 1/CS_n))

PB_MR_E<- rma.mv(lnRR_Sa, lnRRV_S, mods = ~1 + sqrt_inv_e_n +  Learning_vs_memory + Year_published + Type_assay + Type_reinforcement + EE_social + EE_exercise + Age_stress_exposure, random = list(~1|Study_ID,
                                                          ~1|ES_ID,
                                                          ~1|Strain), method = "REML", test = "t", 
    data = dat_Efm)

estimates_PB_MR_E<- estimates.CI(PB_MR_E)
estimates_PB_MR_E

#no effect of inv_effective sample size or year published
```

#### Leave-one-out analysis
``` {r, message = FALSE, warning = FALSE, eval = FALSE}
dat$Study_ID <- as.factor(dat$Study_ID)

LeaveOneOut_effectsize <- list()
for(i in 1:length(levels(dat$Study_ID))){
  LeaveOneOut_effectsize[[i]] <- rma.mv(yi = lnRR_Ea, V = lnRRV_E, 
                                        random = list(~1 | Study_ID,~1| ES_ID, ~1 | Strain), 
                                        method = "REML", data = dat[dat$Study_ID
                                                                    != levels(dat$Study_ID)[i], ])}


# writing function for extracting est, ci.lb, and ci.ub from all models
est.func <- function(mod_E0){
  df <- data.frame(est = mod_E0$b, lower = mod_E0$ci.lb, upper = mod_E0$ci.ub)
  return(df)
}


#using dplyr to form data frame
MA_CVR_E <- lapply(LeaveOneOut_effectsize, function(x) est.func(x))%>% bind_rows %>% mutate(left_out = levels(dat$Study_ID))


saveRDS(MA_CVR_E,file = here("Rdata", "MA_CVR_E.rds"))
```

``` {r, message = FALSE, warning = FALSE}
#telling ggplot to stop reordering factors
MA_CVR_E <- readRDS(file = here("Rdata", "MA_CVR_E.rds"))

MA_CVR_E$left_out<- as.factor(MA_CVR_E$left_out)
MA_CVR_E$left_out<-factor(MA_CVR_E$left_out, levels = MA_CVR_E$left_out)


#plotting
leaveoneout_E <- ggplot(MA_CVR_E) +
  geom_hline(yintercept = 0, lty = 2, lwd = 1) +
  geom_hline(yintercept = mod_E0$ci.lb, lty = 3, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_E0$b, lty = 1, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_E0$ci.ub, lty = 3, lwd = 0.75, colour = "black") +
  geom_pointrange(aes(x = left_out, y = est, ymin = lower, ymax = upper)) +
  xlab("Study left out") + 
  ylab("lnRR, 95% CI") + 
  coord_flip() +
  theme(panel.grid.minor = element_blank())+
  theme_bw() + theme(panel.grid.major = element_blank()) +
  theme(panel.grid.minor.x = element_blank() ) +
  theme(axis.text.y = element_text(size = 6))

leaveoneout_E

dat$Study_ID <- as.integer(dat$Study_ID)
```

## Stress

### Intercept model
Learning and memory are significantly reduced due to stress. 
High heterogeneity

``` {r, message = FALSE, warning = FALSE}
mod_S0 <- rma.mv(yi = lnRR_Sa, V = VCV_S, random = list(~1|Study_ID,
                                                          ~1|ES_ID,
                                                          ~1|Strain),
                 test = "t", 
                 data = dat)

summary(mod_S0) 
i2_ml(mod_S0) 

orchard_plot(mod_S0, mod = "Int", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 24), # change font sizes
        legend.title = element_text(size = 15),
        legend.text = element_text(size = 13)) 
```

## Meta-regression

### Uni-moderator metaregression {.tabset}

#### Type of assay
The type of learning/memory response 

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
dat$Type_assay<-as.factor(dat$Type_assay)

VCV_S1 <- impute_covariance_matrix(vi = dat1$lnRRV_S, cluster = dat$Study_ID, r = 0.5)


mod_S1 <- rma.mv(yi = lnRR_Sa, V = VCV_S1, mod = ~Type_assay-1, random =   list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                 test = "t",
                 data = dat1)

summary(mod_S1)
r2_ml(mod_S1) 

Learning_S <-orchard_plot(mod_S1, mod = "Type_assay", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Learning_S
```

#### Learning vs Memory
Is the assay broadly measuring learning or memory? 

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}

mod_S2 <-  rma.mv(yi = lnRR_Sa, V = VCV_S, mod = ~Learning_vs_memory-1, random = list(~1|Study_ID,
                                                                                        ~1|ES_ID,
                                                                                        ~1|Strain),
                  test = "t",
                  data = dat)

summary(mod_S2) 
r2_ml(mod_S2) 

LvsM_S <- orchard_plot(mod_S2, mod = "Learning_vs_memory", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

LvsM_S 
```

#### Type of reinforcement
The type of cue used 

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}

VCV_S2 <- impute_covariance_matrix(vi = dat2$lnRRV_S, cluster = dat$Study_ID, r = 0.5)

mod_S3 <- rma.mv(yi = lnRR_Sa, V = VCV_S2, mod = ~ Type_reinforcement-1, random = list(~1|Study_ID,
                                                                                            ~1|ES_ID,
                                                                                            ~1|Strain),
                 test = "t",
                 data = dat2)

summary(mod_S3)
r2_ml(mod_S3) 

Reinforcement_S <-orchard_plot(mod_S3, mod = "Type_reinforcement", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Reinforcement_S
````

#### Type of stress
The type of stress manipulation 

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
dat3 <- filter(dat, Type_stress_exposure %in% c("Restraint", "Noise", "MS", "Combination"))
VCV_S3 <- impute_covariance_matrix(vi = dat3$lnRRV_S, cluster = dat3$Study_ID, r = 0.5)

mod_S4 <- rma.mv(yi = lnRR_Sa, V = VCV_S3, mod = ~Type_stress_exposure-1, random = list(~1|Study_ID,
                                                                                         ~1|ES_ID,
                                                                                         ~1|Strain),
                 test = "t",
                 data = dat3)
summary(mod_S4) 
r2_ml(mod_S4)

Stressor<- orchard_plot(mod_S4, mod = "Type_stress_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Stressor
````

#### Age of stress
``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
VCV_S3a <- impute_covariance_matrix(vi = dat$lnRRV_S, cluster = dat$Study_ID, r = 0.5)

mod_S5 <-rma.mv(yi = lnRR_Sa, V = VCV_S3a, mod = ~Age_stress_exposure-1, random = list(~1|Study_ID,
                                                                                       ~1|ES_ID,
                                                                                       ~1|Strain),
                test = "t",
                data = dat)
summary(mod_S5) 
r2_ml(mod_S5) 

Age_S <- orchard_plot(mod_S5, mod = "Age_stress_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Age_S 
```

#### Stess duration
How long was the stress applied for (chronic = every day for 7 days or more)?
This has the highest marginal R2

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
dat4 <- filter(dat, Stress_duration %in% c("Chronic", "Acute"))
VCV_S4 <- impute_covariance_matrix(vi = dat4$lnRRV_S, cluster = dat4$Study_ID, r = 0.5)

mod_S6 <-rma.mv(yi = lnRR_Sa, V = VCV_S4, mod = ~Stress_duration-1, random = list(~1|Study_ID,
                                                                                   ~1|ES_ID,
                                                                                   ~1|Strain),
                test = "t",
                data = dat4)
summary(mod_S6) 
r2_ml(mod_S6) 

Duration_S <- orchard_plot(mod_S6, mod = "Stress_duration", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Duration_S 
```

###  Meta-regression: multi-moderator model
``` {r, message = FALSE, warning = FALSE, eval = FALSE}

#selecting moderator levels with k >=5
dat_Sfm <- dat %>%
  filter(Type_assay %in% c("Recognition", "Habituation", "Conditioning"),
         Type_reinforcement %in% c("Appetitive", "Aversive", "Not applicable"),
         Type_stress_exposure %in% c("Restraint", "Noise", "MS", "Combination"),
         Stress_duration %in% c("Chronic", "Acute"))

VCV_Sfm <- impute_covariance_matrix(vi = dat_Sfm$lnRRV_E, cluster = dat_Sfm$Study_ID, r = 0.5)
                 
mod_Sfm <- rma.mv(yi = lnRR_Sa, V = VCV_Sfm, mod = ~ Type_assay -1 + Learning_vs_memory + Type_reinforcement + Type_stress_exposure + Age_stress_exposure + Stress_duration, random =   list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                    test = "t",
                 data = dat_Sfm)
#summary(mod_Sfm)
#r2_ml(mod_Sfm) 

res_Sfm <- dredge(mod_Sfm, trace=2)
saveRDS(res_Sfm, file = here("Rdata", "res_Sfm.rds"))
# also saving the full model and data
saveRDS(mod_Sfm, file = here("Rdata", "mod_Sfm.rds"))
saveRDS(dat_Sfm, file = here("Rdata", "dat_Sfm.rds"))
```

```{r, message = FALSE, warning = FALSE}
dat_Sfm <- readRDS(file = here("Rdata", "dat_Sfm.rds"))
mod_Sfm <- readRDS(file = here("Rdata", "mod_Sfm.rds"))
res_Sfm <- readRDS(file = here("Rdata", "res_Sfm.rds"))
res_Sfm2<- subset(res_Sfm, delta <= 6, recalc.weights=FALSE)
importance(res_Sfm2) 
```

### Publication bias & sensitivity analysis

#### Publication bias

``` {r, message = FALSE, warning = FALSE}
# funnel plot
Funnel_S <- funnel(mod_Sfm, xlab = "lnRR", ylab = "Standard Error")
Funnel_S

#calculating inv effective sample size for use in full meta-regression
dat_Sfm$sqrt_inv_e_n <- with(dat_Sfm, sqrt(1/CC_n + 1/EC_n + 1/ES_n + 1/CS_n))

#time lag bias and eggers regression
dat_Sfm$c_Year_published <- as.vector(scale(dat_Sfm$Year_published, scale = F))

PB_MR_S<- rma.mv(lnRR_Sa, lnRRV_S, mods = ~1 + sqrt_inv_e_n +  c_Year_published + Type_assay +Learning_vs_memory + Type_reinforcement + Type_stress_exposure + Age_stress_exposure, random = list(~1|Study_ID,
                                                          ~1|ES_ID,
                                                          ~1|Strain), 
                 method = "REML", 
                 test = "t", 
                 data = dat_Sfm,
                  control=list(optimizer="optim", optmethod="Nelder-Mead"))

estimates_PB_MR_S<- estimates.CI(PB_MR_S)
estimates_PB_MR_S
#no effect of inv_effective sample size or year published
```

#### Leave-one-out sensitivity analysis

``` {r, message = FALSE, warning = FALSE, eval = FALSE}
dat$Study_ID <- as.factor(dat$Study_ID)

LeaveOneOut_effectsize <- list()
for(i in 1:length(levels(dat$Study_ID))){
  LeaveOneOut_effectsize[[i]] <- rma.mv(yi = lnRR_Sa, V = lnRRV_S, 
                                        random = list(~1 | Study_ID,~1| ES_ID, ~1 | Strain), 
                                        method = "REML", data = dat[dat$Study_ID
                                                                    != levels(dat$Study_ID)[i], ])}


# writing function for extracting est, ci.lb, and ci.ub from all models
est.func <- function(mod_E0){
  df <- data.frame(est = mod_E0$b, lower = mod_E0$ci.lb, upper = mod_E0$ci.ub)
  return(df)
}


#using dplyr to form data frame
MA_CVR_S <- lapply(LeaveOneOut_effectsize, function(x) est.func(x))%>% bind_rows %>% mutate(left_out = levels(dat$Study_ID))

saveRDS(MA_CVR_S,file = here("Rdata", "MA_CVR_S.rds"))

```

``` {r, message = FALSE, warning = FALSE}
MA_CVR_S <- readRDS(file = here("Rdata", "MA_CVR_S.rds"))

#telling ggplot to stop reordering factors
MA_CVR_S$left_out<- as.factor(MA_CVR_S$left_out)
MA_CVR_S$left_out<-factor(MA_CVR_S$left_out, levels = MA_CVR_S$left_out)

#plotting
leaveoneout_S <- ggplot(MA_CVR_S) +
  geom_hline(yintercept = 0, lty = 2, lwd = 1) +
  geom_hline(yintercept = mod_S0$ci.lb, lty = 3, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_S0$b, lty = 1, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_S0$ci.ub, lty = 3, lwd = 0.75, colour = "black") +
  geom_pointrange(aes(x = left_out, y = est, ymin = lower, ymax = upper)) +
  xlab("Study left out") + 
  ylab("lnRR, 95% CI") + 
  coord_flip() +
  theme(panel.grid.minor = element_blank())+
  theme_bw() + theme(panel.grid.major = element_blank()) +
  theme(panel.grid.minor.x = element_blank() ) +
  theme(axis.text.y = element_text(size = 6))

leaveoneout_S

dat$Study_ID <- as.integer(dat$Study_ID)
```


## Interaction of stress and EE

### Intercept
Enriched and stress animals are better at learning and memory. 

``` {r, message = FALSE, warning = FALSE}
mod_ES0 <- rma.mv(yi = lnRR_ESa, V = VCV_ES, random = list(~1|Study_ID,
                                                             ~1|ES_ID,
                                                             ~1|Strain),
                  test = "t", 
                  data = dat)

summary(mod_ES0) 
i2_ml(mod_ES0) 

orchard_plot(mod_ES0, mod = "Int", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 24), # change font sizes
        legend.title = element_text(size = 15),
        legend.text = element_text(size = 13)) 
````

#### Intercept with outlier removed

``` {r, message = FALSE, warning = FALSE}

dat_outliers <- dat %>%
  filter(ES_ID != 88)

VCV_ES_outliers <- impute_covariance_matrix(vi = dat_outliers$lnRRV_E, cluster = dat_outliers$Study_ID, r = 0.5)

mod_ESoutlier <- rma.mv(yi = lnRR_ESa, V = VCV_ES_outliers, random = list(~1|Study_ID,
                                                             ~1|ES_ID,
                                                             ~1|Strain),
                  test = "t", 
                  data = dat_outliers)

summary(mod_ESoutlier)

orchard_plot(mod_ESoutlier, mod = "Int", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 24), # change font sizes
        legend.title = element_text(size = 15),
        legend.text = element_text(size = 13))   
```

## Meta-regression

### Uni-moderator meta-regression {.tabset}

#### Type of assay
The type of learning/memory response 

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
VCV_ES1 <- impute_covariance_matrix(vi = dat1$lnRRV_ES, cluster = dat$Study_ID, r = 0.5)

mod_ES1 <- rma.mv(yi = lnRR_ESa, V = VCV_ES1, mod = ~Type_assay-1, random = list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                  test = "t",
                  data = dat1)

summary(mod_ES1)
r2_ml(mod_ES1) 

Learning_ES <- orchard_plot(mod_ES1, mod = "Type_assay", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Learning_ES
```

#### Learning vs Memory
Is the assay broadly measuring learning or memory? 

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
mod_ES2 <-  rma.mv(yi = lnRR_ESa, V = VCV_ES, mod = ~Learning_vs_memory-1, random = list(~1|Study_ID, 
                                                                                           ~1|ES_ID,
                                                                                           ~1|Strain),
                   test = "t",
                   data = dat)

summary(mod_ES2) 
r2_ml(mod_ES2) 

LvsM_ES <- orchard_plot(mod_ES2, mod = "Learning_vs_memory", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

LvsM_ES 
```

#### Type of reinforcement
The type of conditioning used

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
VCV_ES2 <- impute_covariance_matrix(vi = dat2$lnRRV_ES, cluster = dat2$Study_ID, r = 0.5)

mod_ES3 <- rma.mv(yi = lnRR_ESa, V = VCV_ES2, mod = ~ Type_reinforcement-1, random = list(~1|Study_ID,
                                                                                               ~1|ES_ID,
                                                                                               ~1|Strain),
                  test = "t",
                  data = dat2)

summary(mod_ES3)
r2_ml(mod_ES3) 

Reinforcement_ES <- orchard_plot(mod_ES3, mod = "Type_reinforcement", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Reinforcement_ES 
````

#### Type of stress
The type of stress manipulation 

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
VCV_ES3 <- impute_covariance_matrix(vi = dat3$lnRRV_ES, cluster = dat3$Study_ID, r = 0.5)
mod_ES4 <- rma.mv(yi = lnRR_ESa, V = VCV_ES3, mod = ~Type_stress_exposure-1, random = list(~1|Study_ID,
                                                                                            ~1|ES_ID,
                                                                                            ~1|Strain),
                  test = "t",
                  data = dat3)
summary(mod_ES4)
r2_ml(mod_ES4)

Stressor_ES <- orchard_plot(mod_ES4, mod = "Type_stress_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7))  

Stressor_ES 
````

#### Age of stress
The age at which the individuals were exposed to the stressor.
``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
mod_ES5 <-rma.mv(yi = lnRR_ESa, V = VCV_ES, mod = ~Age_stress_exposure-1, random = list(~1|Study_ID,
                                                                                          ~1|ES_ID,
                                                                                          ~1|Strain),
                 test = "t",
                 data = dat)
summary(mod_ES5) 
r2_ml(mod_ES5) 

Age_stress_ES<-orchard_plot(mod_ES5, mod = "Age_stress_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Age_stress_ES
```

#### Stress duration
How long was the stress applied for (chronic = every day for 7 days or more)?
This has the highest marginal R2 (currentl nearly 43%) - need to redo without outlier

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
VCV_ES4 <- impute_covariance_matrix(vi = dat4$lnRRV_ES, cluster = dat4$Study_ID, r = 0.5)

mod_ES6 <-rma.mv(yi = lnRR_ESa, V = VCV_ES4, mod = ~Stress_duration-1, random = list(~1|Study_ID,
                                                                                      ~1|ES_ID,
                                                                                      ~1|Strain),
                 test = "t",
                 data = dat4)
summary(mod_ES6) 
r2_ml(mod_ES6) 


Duration_ES<- orchard_plot(mod_ES6, mod = "Stress_duration", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Duration_ES
```

#### Social enrichment
Does EE also include a manipulation of social environment (i.e., number of individuals in EE relative to control)? 

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
VCV_ES5 <- impute_covariance_matrix(vi = dat5$lnRRV_ES, cluster = dat5$Study_ID, r = 0.5)
mod_ES7<- rma.mv(yi = lnRR_ESa, V = VCV_ES5, mod = ~EE_social-1, random = list(~1|Study_ID,
                                                                                ~1|ES_ID,
                                                                                ~1|Strain),
                 test = "t",
                 data = dat5)

summary(mod_ES7)
r2_ml(mod_ES7) 

Social_ES <- orchard_plot(mod_ES7, mod = "EE_social", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Social_ES
```

#### Exercise enrichment
Does the form of enrichment include exercise through a running wheel or treadmill? 

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
mod_ES8<- rma.mv(yi = lnRR_ESa, V = VCV_ES, mod = ~EE_exercise-1, random = list(~1|Study_ID, 
    ~1|ES_ID,
    ~1|Strain),
     test = "t",
     data = dat)

summary(mod_ES8)
r2_ml(mod_ES8) 

Exercise_ES <- orchard_plot(mod_ES8, mod = "EE_exercise", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Exercise_ES
```

#### Order to treatment exposure
What order were animals exposed to stress and EE

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
mod_ES9 <- rma.mv(yi = lnRR_ESa, V = VCV_ES, mod = ~Exposure_order -1, random = list(~1|Study_ID, 
    ~1|ES_ID,
    ~1|Strain),
     test = "t",
     data = dat)

summary(mod_ES9)
r2_ml(mod_ES9)

Order_ES <- orchard_plot(mod_ES9, mod = "Exposure_order", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Order_ES 
```

#### Age of enrichment
What age were individuals exposed to EE
``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
VCV_ES6 <- impute_covariance_matrix(vi = dat6$lnRRV_ES, cluster = dat6$Study_ID, r = 0.5)

mod_ES10 <- rma.mv(yi = lnRR_ESa, V = VCV_ES6, mod = ~Age_EE_exposure-1, random = list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                 test = "t",
                 data = dat6)

summary(mod_ES10) 
r2_ml(mod_ES10) 

Age_enrichment_ES <- orchard_plot(mod_ES10, mod = "Age_EE_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Age_enrichment_ES
```

###  Meta-regression: multi-moderator model

``` {r, message = FALSE, warning = FALSE, eval = FALSE}
dat_ESfm <- dat %>%
  filter(Type_assay %in% c("Recognition", "Habituation", "Conditioning"),
         Type_reinforcement %in% c("Appetitive", "Aversive", "Not applicable"),
         EE_social %in% c("Social", "Non-social"),
         Age_EE_exposure %in% c("Adult", "Adolescent"),
         Type_stress_exposure %in% c("Restraint", "Noise", "MS", "Combination"),
         Stress_duration %in% c("Chronic", "Acute"), 
         Age_stress_exposure %in% c("Prenatal", "Early postnatal", "Adult"))

VCV_ESfm <- impute_covariance_matrix(vi = dat_ESfm$lnRRV_ES, cluster = dat_ESfm$Study_ID, r = 0.5)
                 
mod_ESfm <- rma.mv(yi = lnRR_Sa, V = VCV_ESfm, mod = ~Type_assay-1 + Learning_vs_memory + Type_reinforcement + EE_social + EE_exercise + Age_EE_exposure + Type_stress_exposure + Age_stress_exposure + Stress_duration + Exposure_order, random =   list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                    test = "t",
                 data = dat_ESfm)
#summary(mod_ESfm)
#r2_ml(mod_ESfm) 


res_ESfm <- dredge(mod_ESfm, trace=2)
saveRDS(res_ESfm, file = here("Rdata", "res_ESfm.rds"))
# also saving the full model and data
saveRDS(mod_ESfm, file = here("Rdata", "mod_ESfm.rds"))
saveRDS(dat_ESfm, file = here("Rdata", "dat_ESfm.rds"))
```


```{r}
dat_ESfm <- readRDS(file = here("Rdata", "dat_ESfm.rds"))
mod_ESfm <- readRDS(file = here("Rdata", "mod_ESfm.rds"))
res_ESfm <- readRDS(file = here("Rdata", "res_ESfm.rds"))
res_ESfm2<- subset(res_ESfm, delta <= 6, recalc.weights=FALSE)
importance(res_ESfm2) 
```

### Publication bias & sensitivity analysis

#### Publication bias

``` {r}
Funnel_ES<-funnel(mod_ESfm, xlab = "lnRR", ylab = "Standard Error")
Funnel_ES
#year published was scaled previously under stress PB

dat_ESfm$sqrt_inv_e_n <- with(dat_ESfm, sqrt(1/CC_n + 1/EC_n + 1/ES_n + 1/CS_n))

PB_MR_ES<- rma.mv(lnRR_ESa, lnRRV_ES, mods = ~1 + sqrt_inv_e_n +  Year_published + Learning_vs_memory + Type_assay + Type_reinforcement + EE_social + EE_exercise + Age_stress_exposure, random = list(~1|Study_ID,
                                                          ~1|ES_ID,
                                                          ~1|Strain), method = "REML", test = "t", 
    data = dat_ESfm)

estimates_PB_MR_ES<- estimates.CI(PB_MR_ES)
estimates_PB_MR_ES

#no effect of inv_effective sample size or year published
```

#### Leave-one-out analysis

``` {r, message = FALSE, warning = FALSE, eval = FALSE}
dat$Study_ID <- as.factor(dat$Study_ID)

LeaveOneOut_effectsize <- list()
for(i in 1:length(levels(dat$Study_ID))){
  LeaveOneOut_effectsize[[i]] <- rma.mv(yi = lnRR_Ea, V = lnRRV_E, 
                                        random = list(~1 | Study_ID,~1| ES_ID, ~1 | Strain), 
                                        method = "REML", data = dat[dat$Study_ID
                                                                    != levels(dat$Study_ID)[i], ])}


# writing function for extracting est, ci.lb, and ci.ub from all models
est.func <- function(mod_E0){
  df <- data.frame(est = mod_E0$b, lower = mod_E0$ci.lb, upper = mod_E0$ci.ub)
  return(df)
}


#using dplyr to form data frame
MA_CVR_ES <- lapply(LeaveOneOut_effectsize, function(x) est.func(x))%>% bind_rows %>% mutate(left_out = levels(dat$Study_ID))

saveRDS(MA_CVR_ES, ,file = here("Rdata", "MA_CVR_ES.rds"))

```

``` {r, message = FALSE, warning = FALSE}
MA_CVR_ES<- readRDS(here("Rdata", "MA_CVR_ES.rds"))

#telling ggplot to stop reordering factors
MA_CVR_ES$left_out<- as.factor(MA_CVR_ES$left_out)
MA_CVR_ES$left_out<-factor(MA_CVR_ES$left_out, levels = MA_CVR_ES$left_out)

#plotting
leaveoneout_ES <- ggplot(MA_CVR_ES) +
  geom_hline(yintercept = 0, lty = 2, lwd = 1) +
  geom_hline(yintercept = mod_E0$ci.lb, lty = 3, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_E0$b, lty = 1, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_E0$ci.ub, lty = 3, lwd = 0.75, colour = "black") +
  geom_pointrange(aes(x = left_out, y = est, ymin = lower, ymax = upper)) +
  xlab("Study left out") + 
  ylab("lnRR, 95% CI") + 
  coord_flip() +
  theme(panel.grid.minor = element_blank())+
  theme_bw() + theme(panel.grid.major = element_blank()) +
  theme(panel.grid.minor.x = element_blank() ) +
  theme(axis.text.y = element_text(size = 6))

leaveoneout_ES

dat$Study_ID <- as.integer(dat$Study_ID)
```

### Combined orchard plot

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
mod_list1 <- list(mod_E0, mod_S0, mod_ES0)

mod_res1 <- lapply(mod_list1, function(x) mod_results(x, mod = "Int"))

merged1 <- submerge(mod_res1[[3]], mod_res1[[2]],  mod_res1[[1]], mix = T)
merged1$mod_table$name <- factor(merged1$mod_table$name, levels = c("Intrcpt1", 
    "Intrcpt2", "Intrcpt3"), 
    labels = rev(c("Enrichment ME", "Stress ME", "Interaction")))

merged1$data$moderator <- factor(merged1$data$moderator, levels = c("Intrcpt1", 
    "Intrcpt2", "Intrcpt3"), 
    labels = rev(c("Enrichment ME", "Stress ME", "Interaction")))

orchard1<- orchard_plot(merged1, mod = "Int", xlab = "lnRR", angle = 0) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals 
  xlim(-2,4.5) +
  geom_point(aes(fill = name),  size = 4, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling +
  scale_colour_manual(values = c("#00AEEF","#00A651","#ED1C24"))+ # change colours
  scale_fill_manual(values=c("#00AEEF","#00A651","#ED1C24"))+
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

orchard1
```

## 'Pairwise' effect sizes {.tabset}

### Enrichment relative to control

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}

VCV_E20 <- impute_covariance_matrix(vi = dat$lnRRV_E2, cluster = dat$Study_ID, r = 0.5)

#Model doesn't converge with VCV
mod_E20 <- rma.mv(yi = lnRR_E2a, V = VCV_E20, random = list(~1|Study_ID,
                                                             ~ 1|Strain,
                                                             ~1|ES_ID),
                 test = "t", 
                 data = dat, 
                 control=list(optimizer="optim", optmethod="Nelder-Mead"))

summary(mod_E20) 
i2_ml(mod_E20) 

orchard_plot(mod_E20, mod = "Int", xlab = "lnRR")
```

### Stress relative to control

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
VCV_S20 <- impute_covariance_matrix(vi = dat$lnRRV_S2, cluster = dat$Study_ID, r = 0.5)

mod_S20 <- rma.mv(yi = lnRR_S2a, V = VCV_S20, random = list(~1|Study_ID, 
                                                             ~ 1|Strain,
                                                             ~1|ES_ID),
                 test = "t",
                 data = dat)

summary(mod_S20) 
i2_ml(mod_S20) 
 
orchard_plot(mod_S20, mod = "Int", xlab = "lnRR")
```

### Enrichment + stress relative to control

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
VCV_ES20 <- impute_covariance_matrix(vi = dat$lnRRV_ES2, cluster = dat$Study_ID, r = 0.5)

mod_ES20 <- rma.mv(yi = lnRR_ES2a, V = VCV_ES20, random = list(~1|Study_ID,
                                                                ~ 1|Strain,
                                                                ~1|ES_ID),
                 test = "t",
                 data = dat)
summary(mod_ES20) 
i2_ml(mod_ES20) 

orchard_plot(mod_ES20, mod = "Int", xlab = "lnRR")
```

### Enrichment + stress relative to stress

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
VCV_E30 <- impute_covariance_matrix(vi = dat$lnRRV_E3, cluster = dat$Study_ID, r = 0.5)

mod_E30 <- rma.mv(yi = lnRR_E3a, V = VCV_E30, random = list(~1|Study_ID,
                                                             ~ 1|Strain,
                                                             ~1|ES_ID),
                  test = "t",
                  data = dat)
summary(mod_E30) 
i2_ml(mod_E30) 

orchard_plot(mod_E30, mod = "Int", xlab = "lnRR")
```

### Enrichment + stress relative to enrichment

``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}
VCV_S30 <- impute_covariance_matrix(vi = dat$lnRRV_S3, cluster = dat$Study_ID, r = 0.5)

mod_S30 <- rma.mv(yi = lnRR_S3a, V = VCV_S30, random = list(~1|Study_ID,
                                                             ~ 1|Strain,
                                                             ~1|ES_ID),
                  test = "t",
                  data = dat,
                   control=list(optimizer="optim", optmethod="Nelder-Mead"))
summary(mod_S30) 
i2_ml(mod_S30) 

orchard_plot(mod_S30, mod = "Int", xlab = "lnRR")
```

## Combined orchard plot
``` {r, message = FALSE, warning = FALSE, fig.width=10, fig.height=7}

mod_list2 <- list(mod_S30, mod_E30, mod_ES20, mod_S20, mod_E20) #rearranged the order so that it matches intext results

mod_res2 <- lapply(mod_list2, function(x) mod_results(x, mod = "Int"))

merged2 <- submerge(mod_res2[[1]], mod_res2[[2]],  mod_res2[[3]], mod_res2[[4]],  mod_res2[[5]], mix = T)

merged2$mod_table$name <- factor(merged2$mod_table$name, levels = c("Intrcpt1", 
    "Intrcpt2", "Intrcpt3", "Intrcpt4", "Intrcpt5"), 
    labels = rev(c("EC/CC", "CS/CC", "ES/CC", "ES/CS", "ES/EC")))

merged2$data$moderator <- factor(merged2$data$moderator, levels = c("Intrcpt1", 
    "Intrcpt2", "Intrcpt3", "Intrcpt4", "Intrcpt5"), 
    labels = rev(c("EC/CC", "CS/CC", "ES/CC", "ES/CS", "ES/EC")))

orchard2 <- orchard_plot(merged2, mod = "Int", xlab = "lnRR", angle = 0) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals 
  xlim(-2,4.5) +
  geom_point(aes(fill = name),  size = 4, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  scale_colour_manual(values = c("#7B81BE","#D7DF23","#F37158","#75CBF2","#97D2B4"))+ # change colours
  scale_fill_manual(values=c("#7B81BE","#D7DF23","#F37158","#75CBF2","#97D2B4"))+
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

orchard2
```

## Panel of 'focal' ES and 'pairwise' ES orchard plots
``` {r, message = FALSE, warning = FALSE}


p1 <- orchard1 + orchard2 +  plot_annotation(tag_levels = 'A')
p1

#saved as PDF: 6 x 15 inches
```


## Panel of meta-regressions {.tabset}

### Environmental enrichment
``` {r, message = FALSE, warning = FALSE,  fig.width=10, fig.height=7}
#Enrichment
E_mod <- (LvsM_E + Learning_E + Reinforcement_E)/ (Age_E + Exercise_E + Social_E) +  plot_annotation(tag_levels = 'A')

E_mod
#saved as pdf 10 x 15 inches
```

### Stress
``` {r, message = FALSE, warning = FALSE,  fig.width=10, fig.height=7}
S_mod <- (LvsM_S + Learning_S + Reinforcement_S) / (Age_S + Stressor + Duration_S) + plot_annotation(tag_levels = 'A')

S_mod
#saved as pdf 10 x 15 inches
```

### Interaction
``` {r, message = FALSE, warning = FALSE,  fig.width=10, fig.height=7}
ES_mod <- plot_grid(LvsM_ES, Learning_ES, Reinforcement_ES, Age_enrichment_ES, Age_stress_ES, Order_ES, Exercise_ES, Social_ES, Stressor_ES, Duration_ES,
  labels = "AUTO", ncol = 5)

ES_mod
#saved as 10 x 20 inches
```

## Panel of funnel plots
``` {r, message = FALSE, warning = FALSE,  fig.width=12, fig.height=6,echo = FALSE}
# EE

pdf(NULL)
dev.control(displaylist="enable")
par(mar=c(4,4,0.1,0))
funnel(mod_Sfm, xlab = "lnRR", ylab = "Standard Error",
        xlim = c(-2,2),
        ylim = c(0,1.05))
A <- recordPlot()
invisible(dev.off())

# Stress

pdf(NULL)
dev.control(displaylist="enable")
par(mar=c(4,4,0.1,0))
funnel(mod_Sfm, xlab = "lnRR", ylab = "Standard Error",
        xlim = c(-2,2),
        ylim = c(0,1.05))
B <- recordPlot()
invisible(dev.off())

# Interaction
pdf(NULL)
dev.control(displaylist="enable")
par(mar=c(4,4,0.1,0))
funnel(mod_ESfm, xlab = "lnRR", ylab = "Standard Error",
        xlim = c(-2,2),
        ylim = c(0,1.05))
C <- recordPlot()
invisible(dev.off())

# putting together
funnels <- (ggdraw(A) + ggdraw(B) + ggdraw(C)) + plot_annotation(tag_levels = 'A')
funnels


png(file = here("figs", "funnels.png"))

dev.off()

```

# Modelling with SMD

## Environmental Enrichment

### Intercept model

``` {r, message = FALSE, warning = FALSE}

# TODO 
#Why does the funnel plot look so strange and why is I2 higher?

mod_E0a <- rma.mv(yi = SMD_Ea, V = VCV_Ea, random = list(~1|Study_ID,
                                                         ~1|ES_ID, 
                                                         ~1|Strain),
                 test = "t",
                 data = dat)
summary(mod_E0a)
i2_ml(mod_E0a)
funnel(mod_E0a, yaxis="seinv")

# VCV matrix
VCV_Efm <- impute_covariance_matrix(vi = dat_Efm$lnRRV_E, cluster = dat_Efm$Study_ID, r = 0.5)

mod_E0b <- rma.mv(yi = SMD_Ea, V = VCV_Efm, mod = ~Type_assay-1 + Learning_vs_memory-1 + Type_reinforcement-1 + EE_social-1 + EE_exercise-1 + Age_EE_exposure-1, random = list(~1|Study_ID,~1|ES_ID,~1|Strain),
                 test = "t",
                 data = dat_Efm)

funnel(mod_E0b, yaxis="seinv")
```

## Stress

### Intercept model

``` {r, message = FALSE, warning = FALSE}
mod_S0a <- rma.mv(yi = SMD_Sa, V = VCV_Sa, random = list(~1|Study_ID,
                                                         ~1|ES_ID,
                                                         ~1|Strain),
                test = "t",
                data = dat)
summary(mod_S0a) 
i2_ml(mod_S0a) 
funnel(mod_S0a, yaxis="seinv")

# VCV matrix
VCV_Sfm <- impute_covariance_matrix(vi = dat_Sfm$lnRRV_E, cluster = dat_Sfm$Study_ID, r = 0.5)

#assessing funnel on model model
mod_S0b <- rma.mv(yi = SMD_Sa, V = VCV_Sfm, mod = ~ Type_assay -1 + Learning_vs_memory + Type_reinforcement + Type_stress_exposure + Age_stress_exposure + Stress_duration, random =   list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                    test = "t",
                 data = dat_Sfm)
funnel(mod_S0b, yaxis="seinv")
```

## Interaction

### Intercept model

``` {r, message = FALSE, warning = FALSE}
mod_ES0a <- rma.mv(yi = SMD_ESa, V = VCV_ESa, random = list(~1|Study_ID,
                                                            ~1|ES_ID,
                                                            ~1|Strain),
                  test = "t",
                  data = dat)
summary(mod_ES0a)
i2_ml(mod_ES0a) 
funnel(mod_ES0a, yaxis="seinv")

# VCV matrix
VCV_ESfm <- impute_covariance_matrix(vi = dat_ESfm$lnRRV_E, cluster = dat_ESfm$Study_ID, r = 0.5)

mod_ES0b <- rma.mv(yi = SMD_Sa, V = VCV_ESfm, mod = ~Type_assay-1 + Learning_vs_memory + Type_reinforcement + EE_social + EE_exercise + Age_EE_exposure + Type_stress_exposure + Age_stress_exposure + Stress_duration + Exposure_order, random =   list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                    test = "t",
                 data = dat_ESfm)

funnel(mod_ES0b, yaxis="seinv")
```
